repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
sequencelengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
sequencelengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
opencobra/memote
memote/support/annotation.py
generate_component_annotation_miriam_match
def generate_component_annotation_miriam_match(elements, component, db): """ Tabulate which MIRIAM databases the element's annotation match. If the relevant MIRIAM identifier is not in an element's annotation it is ignored. Parameters ---------- elements : list Elements of a model, either metabolites or reactions. component : {"metabolites", "reactions"} A string denoting a type of ``cobra.Model`` component. db : str One of the MIRIAM database identifiers. Returns ------- list The components whose annotation does not match the pattern for the MIRIAM database. """ def is_faulty(annotation, key, pattern): # Ignore missing annotation for this database. if key not in annotation: return False test = annotation[key] if isinstance(test, native_str): return pattern.match(test) is None else: return any(pattern.match(elem) is None for elem in test) pattern = { "metabolites": METABOLITE_ANNOTATIONS, "reactions": REACTION_ANNOTATIONS, "genes": GENE_PRODUCT_ANNOTATIONS }[component][db] return [elem for elem in elements if is_faulty(elem.annotation, db, pattern)]
python
def generate_component_annotation_miriam_match(elements, component, db): """ Tabulate which MIRIAM databases the element's annotation match. If the relevant MIRIAM identifier is not in an element's annotation it is ignored. Parameters ---------- elements : list Elements of a model, either metabolites or reactions. component : {"metabolites", "reactions"} A string denoting a type of ``cobra.Model`` component. db : str One of the MIRIAM database identifiers. Returns ------- list The components whose annotation does not match the pattern for the MIRIAM database. """ def is_faulty(annotation, key, pattern): # Ignore missing annotation for this database. if key not in annotation: return False test = annotation[key] if isinstance(test, native_str): return pattern.match(test) is None else: return any(pattern.match(elem) is None for elem in test) pattern = { "metabolites": METABOLITE_ANNOTATIONS, "reactions": REACTION_ANNOTATIONS, "genes": GENE_PRODUCT_ANNOTATIONS }[component][db] return [elem for elem in elements if is_faulty(elem.annotation, db, pattern)]
[ "def", "generate_component_annotation_miriam_match", "(", "elements", ",", "component", ",", "db", ")", ":", "def", "is_faulty", "(", "annotation", ",", "key", ",", "pattern", ")", ":", "# Ignore missing annotation for this database.", "if", "key", "not", "in", "annotation", ":", "return", "False", "test", "=", "annotation", "[", "key", "]", "if", "isinstance", "(", "test", ",", "native_str", ")", ":", "return", "pattern", ".", "match", "(", "test", ")", "is", "None", "else", ":", "return", "any", "(", "pattern", ".", "match", "(", "elem", ")", "is", "None", "for", "elem", "in", "test", ")", "pattern", "=", "{", "\"metabolites\"", ":", "METABOLITE_ANNOTATIONS", ",", "\"reactions\"", ":", "REACTION_ANNOTATIONS", ",", "\"genes\"", ":", "GENE_PRODUCT_ANNOTATIONS", "}", "[", "component", "]", "[", "db", "]", "return", "[", "elem", "for", "elem", "in", "elements", "if", "is_faulty", "(", "elem", ".", "annotation", ",", "db", ",", "pattern", ")", "]" ]
Tabulate which MIRIAM databases the element's annotation match. If the relevant MIRIAM identifier is not in an element's annotation it is ignored. Parameters ---------- elements : list Elements of a model, either metabolites or reactions. component : {"metabolites", "reactions"} A string denoting a type of ``cobra.Model`` component. db : str One of the MIRIAM database identifiers. Returns ------- list The components whose annotation does not match the pattern for the MIRIAM database.
[ "Tabulate", "which", "MIRIAM", "databases", "the", "element", "s", "annotation", "match", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/annotation.py#L167-L206
opencobra/memote
memote/support/annotation.py
generate_component_id_namespace_overview
def generate_component_id_namespace_overview(model, components): """ Tabulate which MIRIAM databases the component's identifier matches. Parameters ---------- model : cobra.Model A cobrapy metabolic model. components : {"metabolites", "reactions", "genes"} A string denoting `cobra.Model` components. Returns ------- pandas.DataFrame The index of the table is given by the component identifiers. Each column corresponds to one MIRIAM database and a Boolean entry determines whether the annotation matches. """ patterns = { "metabolites": METABOLITE_ANNOTATIONS, "reactions": REACTION_ANNOTATIONS, "genes": GENE_PRODUCT_ANNOTATIONS }[components] databases = list(patterns) data = list() index = list() for elem in getattr(model, components): index.append(elem.id) data.append(tuple(patterns[db].match(elem.id) is not None for db in databases)) df = pd.DataFrame(data, index=index, columns=databases) if components != "genes": # Clean up of the dataframe. Unfortunately the Biocyc patterns match # broadly. Hence, whenever a Metabolite or Reaction ID matches to any # DB pattern AND the Biocyc pattern we have to assume that this is a # false positive. # First determine all rows in which 'biocyc' and other entries are # True simultaneously and use this Boolean series to create another # column temporarily. df['duplicate'] = df[df['biocyc']].sum(axis=1) >= 2 # Replace all nan values with False df['duplicate'].fillna(False, inplace=True) # Use the additional column to index the original dataframe to identify # false positive biocyc hits and set them to False. df.loc[df['duplicate'], 'biocyc'] = False # Delete the additional column del df['duplicate'] return df
python
def generate_component_id_namespace_overview(model, components): """ Tabulate which MIRIAM databases the component's identifier matches. Parameters ---------- model : cobra.Model A cobrapy metabolic model. components : {"metabolites", "reactions", "genes"} A string denoting `cobra.Model` components. Returns ------- pandas.DataFrame The index of the table is given by the component identifiers. Each column corresponds to one MIRIAM database and a Boolean entry determines whether the annotation matches. """ patterns = { "metabolites": METABOLITE_ANNOTATIONS, "reactions": REACTION_ANNOTATIONS, "genes": GENE_PRODUCT_ANNOTATIONS }[components] databases = list(patterns) data = list() index = list() for elem in getattr(model, components): index.append(elem.id) data.append(tuple(patterns[db].match(elem.id) is not None for db in databases)) df = pd.DataFrame(data, index=index, columns=databases) if components != "genes": # Clean up of the dataframe. Unfortunately the Biocyc patterns match # broadly. Hence, whenever a Metabolite or Reaction ID matches to any # DB pattern AND the Biocyc pattern we have to assume that this is a # false positive. # First determine all rows in which 'biocyc' and other entries are # True simultaneously and use this Boolean series to create another # column temporarily. df['duplicate'] = df[df['biocyc']].sum(axis=1) >= 2 # Replace all nan values with False df['duplicate'].fillna(False, inplace=True) # Use the additional column to index the original dataframe to identify # false positive biocyc hits and set them to False. df.loc[df['duplicate'], 'biocyc'] = False # Delete the additional column del df['duplicate'] return df
[ "def", "generate_component_id_namespace_overview", "(", "model", ",", "components", ")", ":", "patterns", "=", "{", "\"metabolites\"", ":", "METABOLITE_ANNOTATIONS", ",", "\"reactions\"", ":", "REACTION_ANNOTATIONS", ",", "\"genes\"", ":", "GENE_PRODUCT_ANNOTATIONS", "}", "[", "components", "]", "databases", "=", "list", "(", "patterns", ")", "data", "=", "list", "(", ")", "index", "=", "list", "(", ")", "for", "elem", "in", "getattr", "(", "model", ",", "components", ")", ":", "index", ".", "append", "(", "elem", ".", "id", ")", "data", ".", "append", "(", "tuple", "(", "patterns", "[", "db", "]", ".", "match", "(", "elem", ".", "id", ")", "is", "not", "None", "for", "db", "in", "databases", ")", ")", "df", "=", "pd", ".", "DataFrame", "(", "data", ",", "index", "=", "index", ",", "columns", "=", "databases", ")", "if", "components", "!=", "\"genes\"", ":", "# Clean up of the dataframe. Unfortunately the Biocyc patterns match", "# broadly. Hence, whenever a Metabolite or Reaction ID matches to any", "# DB pattern AND the Biocyc pattern we have to assume that this is a", "# false positive.", "# First determine all rows in which 'biocyc' and other entries are", "# True simultaneously and use this Boolean series to create another", "# column temporarily.", "df", "[", "'duplicate'", "]", "=", "df", "[", "df", "[", "'biocyc'", "]", "]", ".", "sum", "(", "axis", "=", "1", ")", ">=", "2", "# Replace all nan values with False", "df", "[", "'duplicate'", "]", ".", "fillna", "(", "False", ",", "inplace", "=", "True", ")", "# Use the additional column to index the original dataframe to identify", "# false positive biocyc hits and set them to False.", "df", ".", "loc", "[", "df", "[", "'duplicate'", "]", ",", "'biocyc'", "]", "=", "False", "# Delete the additional column", "del", "df", "[", "'duplicate'", "]", "return", "df" ]
Tabulate which MIRIAM databases the component's identifier matches. Parameters ---------- model : cobra.Model A cobrapy metabolic model. components : {"metabolites", "reactions", "genes"} A string denoting `cobra.Model` components. Returns ------- pandas.DataFrame The index of the table is given by the component identifiers. Each column corresponds to one MIRIAM database and a Boolean entry determines whether the annotation matches.
[ "Tabulate", "which", "MIRIAM", "databases", "the", "component", "s", "identifier", "matches", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/annotation.py#L209-L257
opencobra/memote
memote/support/essentiality.py
confusion_matrix
def confusion_matrix(predicted_essential, expected_essential, predicted_nonessential, expected_nonessential): """ Compute a representation of the confusion matrix. Parameters ---------- predicted_essential : set expected_essential : set predicted_nonessential : set expected_nonessential : set Returns ------- dict Confusion matrix as different keys of a dictionary. The abbreviated keys correspond to the ones used in [1]_. References ---------- .. [1] `Wikipedia entry for the Confusion matrix <https://en.wikipedia.org/wiki/Confusion_matrix>`_ """ true_positive = predicted_essential & expected_essential tp = len(true_positive) true_negative = predicted_nonessential & expected_nonessential tn = len(true_negative) false_positive = predicted_essential - expected_essential fp = len(false_positive) false_negative = predicted_nonessential - expected_nonessential fn = len(false_negative) # sensitivity or true positive rate try: tpr = tp / (tp + fn) except ZeroDivisionError: tpr = None # specificity or true negative rate try: tnr = tn / (tn + fp) except ZeroDivisionError: tnr = None # precision or positive predictive value try: ppv = tp / (tp + fp) except ZeroDivisionError: ppv = None # false discovery rate fdr = 1 - ppv # accuracy try: acc = (tp + tn) / (tp + tn + fp + fn) except ZeroDivisionError: acc = None # Compute Matthews correlation coefficient. try: mcc = (tp * tn - fp * fn) /\ sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn)) except ZeroDivisionError: mcc = None return { "TP": list(true_positive), "TN": list(true_negative), "FP": list(false_positive), "FN": list(false_negative), "TPR": tpr, "TNR": tnr, "PPV": ppv, "FDR": fdr, "ACC": acc, "MCC": mcc }
python
def confusion_matrix(predicted_essential, expected_essential, predicted_nonessential, expected_nonessential): """ Compute a representation of the confusion matrix. Parameters ---------- predicted_essential : set expected_essential : set predicted_nonessential : set expected_nonessential : set Returns ------- dict Confusion matrix as different keys of a dictionary. The abbreviated keys correspond to the ones used in [1]_. References ---------- .. [1] `Wikipedia entry for the Confusion matrix <https://en.wikipedia.org/wiki/Confusion_matrix>`_ """ true_positive = predicted_essential & expected_essential tp = len(true_positive) true_negative = predicted_nonessential & expected_nonessential tn = len(true_negative) false_positive = predicted_essential - expected_essential fp = len(false_positive) false_negative = predicted_nonessential - expected_nonessential fn = len(false_negative) # sensitivity or true positive rate try: tpr = tp / (tp + fn) except ZeroDivisionError: tpr = None # specificity or true negative rate try: tnr = tn / (tn + fp) except ZeroDivisionError: tnr = None # precision or positive predictive value try: ppv = tp / (tp + fp) except ZeroDivisionError: ppv = None # false discovery rate fdr = 1 - ppv # accuracy try: acc = (tp + tn) / (tp + tn + fp + fn) except ZeroDivisionError: acc = None # Compute Matthews correlation coefficient. try: mcc = (tp * tn - fp * fn) /\ sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn)) except ZeroDivisionError: mcc = None return { "TP": list(true_positive), "TN": list(true_negative), "FP": list(false_positive), "FN": list(false_negative), "TPR": tpr, "TNR": tnr, "PPV": ppv, "FDR": fdr, "ACC": acc, "MCC": mcc }
[ "def", "confusion_matrix", "(", "predicted_essential", ",", "expected_essential", ",", "predicted_nonessential", ",", "expected_nonessential", ")", ":", "true_positive", "=", "predicted_essential", "&", "expected_essential", "tp", "=", "len", "(", "true_positive", ")", "true_negative", "=", "predicted_nonessential", "&", "expected_nonessential", "tn", "=", "len", "(", "true_negative", ")", "false_positive", "=", "predicted_essential", "-", "expected_essential", "fp", "=", "len", "(", "false_positive", ")", "false_negative", "=", "predicted_nonessential", "-", "expected_nonessential", "fn", "=", "len", "(", "false_negative", ")", "# sensitivity or true positive rate", "try", ":", "tpr", "=", "tp", "/", "(", "tp", "+", "fn", ")", "except", "ZeroDivisionError", ":", "tpr", "=", "None", "# specificity or true negative rate", "try", ":", "tnr", "=", "tn", "/", "(", "tn", "+", "fp", ")", "except", "ZeroDivisionError", ":", "tnr", "=", "None", "# precision or positive predictive value", "try", ":", "ppv", "=", "tp", "/", "(", "tp", "+", "fp", ")", "except", "ZeroDivisionError", ":", "ppv", "=", "None", "# false discovery rate", "fdr", "=", "1", "-", "ppv", "# accuracy", "try", ":", "acc", "=", "(", "tp", "+", "tn", ")", "/", "(", "tp", "+", "tn", "+", "fp", "+", "fn", ")", "except", "ZeroDivisionError", ":", "acc", "=", "None", "# Compute Matthews correlation coefficient.", "try", ":", "mcc", "=", "(", "tp", "*", "tn", "-", "fp", "*", "fn", ")", "/", "sqrt", "(", "(", "tp", "+", "fp", ")", "*", "(", "tp", "+", "fn", ")", "*", "(", "tn", "+", "fp", ")", "*", "(", "tn", "+", "fn", ")", ")", "except", "ZeroDivisionError", ":", "mcc", "=", "None", "return", "{", "\"TP\"", ":", "list", "(", "true_positive", ")", ",", "\"TN\"", ":", "list", "(", "true_negative", ")", ",", "\"FP\"", ":", "list", "(", "false_positive", ")", ",", "\"FN\"", ":", "list", "(", "false_negative", ")", ",", "\"TPR\"", ":", "tpr", ",", "\"TNR\"", ":", "tnr", ",", "\"PPV\"", ":", "ppv", ",", "\"FDR\"", ":", "fdr", ",", "\"ACC\"", ":", "acc", ",", "\"MCC\"", ":", "mcc", "}" ]
Compute a representation of the confusion matrix. Parameters ---------- predicted_essential : set expected_essential : set predicted_nonessential : set expected_nonessential : set Returns ------- dict Confusion matrix as different keys of a dictionary. The abbreviated keys correspond to the ones used in [1]_. References ---------- .. [1] `Wikipedia entry for the Confusion matrix <https://en.wikipedia.org/wiki/Confusion_matrix>`_
[ "Compute", "a", "representation", "of", "the", "confusion", "matrix", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/essentiality.py#L29-L100
opencobra/memote
memote/suite/api.py
validate_model
def validate_model(path): """ Validate a model structurally and optionally store results as JSON. Parameters ---------- path : Path to model file. Returns ------- tuple cobra.Model The metabolic model under investigation. tuple A tuple reporting on the SBML level, version, and FBC package version used (if any) in the SBML document. dict A simple dictionary containing a list of errors and warnings. """ notifications = {"warnings": [], "errors": []} model, sbml_ver = val.load_cobra_model(path, notifications) return model, sbml_ver, notifications
python
def validate_model(path): """ Validate a model structurally and optionally store results as JSON. Parameters ---------- path : Path to model file. Returns ------- tuple cobra.Model The metabolic model under investigation. tuple A tuple reporting on the SBML level, version, and FBC package version used (if any) in the SBML document. dict A simple dictionary containing a list of errors and warnings. """ notifications = {"warnings": [], "errors": []} model, sbml_ver = val.load_cobra_model(path, notifications) return model, sbml_ver, notifications
[ "def", "validate_model", "(", "path", ")", ":", "notifications", "=", "{", "\"warnings\"", ":", "[", "]", ",", "\"errors\"", ":", "[", "]", "}", "model", ",", "sbml_ver", "=", "val", ".", "load_cobra_model", "(", "path", ",", "notifications", ")", "return", "model", ",", "sbml_ver", ",", "notifications" ]
Validate a model structurally and optionally store results as JSON. Parameters ---------- path : Path to model file. Returns ------- tuple cobra.Model The metabolic model under investigation. tuple A tuple reporting on the SBML level, version, and FBC package version used (if any) in the SBML document. dict A simple dictionary containing a list of errors and warnings.
[ "Validate", "a", "model", "structurally", "and", "optionally", "store", "results", "as", "JSON", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/suite/api.py#L41-L64
opencobra/memote
memote/suite/api.py
snapshot_report
def snapshot_report(result, config=None, html=True): """ Generate a snapshot report from a result set and configuration. Parameters ---------- result : memote.MemoteResult Nested dictionary structure as returned from the test suite. config : dict, optional The final test report configuration (default None). html : bool, optional Whether to render the report as full HTML or JSON (default True). """ if config is None: config = ReportConfiguration.load() report = SnapshotReport(result=result, configuration=config) if html: return report.render_html() else: return report.render_json()
python
def snapshot_report(result, config=None, html=True): """ Generate a snapshot report from a result set and configuration. Parameters ---------- result : memote.MemoteResult Nested dictionary structure as returned from the test suite. config : dict, optional The final test report configuration (default None). html : bool, optional Whether to render the report as full HTML or JSON (default True). """ if config is None: config = ReportConfiguration.load() report = SnapshotReport(result=result, configuration=config) if html: return report.render_html() else: return report.render_json()
[ "def", "snapshot_report", "(", "result", ",", "config", "=", "None", ",", "html", "=", "True", ")", ":", "if", "config", "is", "None", ":", "config", "=", "ReportConfiguration", ".", "load", "(", ")", "report", "=", "SnapshotReport", "(", "result", "=", "result", ",", "configuration", "=", "config", ")", "if", "html", ":", "return", "report", ".", "render_html", "(", ")", "else", ":", "return", "report", ".", "render_json", "(", ")" ]
Generate a snapshot report from a result set and configuration. Parameters ---------- result : memote.MemoteResult Nested dictionary structure as returned from the test suite. config : dict, optional The final test report configuration (default None). html : bool, optional Whether to render the report as full HTML or JSON (default True).
[ "Generate", "a", "snapshot", "report", "from", "a", "result", "set", "and", "configuration", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/suite/api.py#L112-L132
opencobra/memote
memote/suite/api.py
history_report
def history_report(history, config=None, html=True): """ Test a model and save a history report. Parameters ---------- history : memote.HistoryManager The manager grants access to previous results. config : dict, optional The final test report configuration. html : bool, optional Whether to render the report as full HTML or JSON (default True). """ if config is None: config = ReportConfiguration.load() report = HistoryReport(history=history, configuration=config) if html: return report.render_html() else: return report.render_json()
python
def history_report(history, config=None, html=True): """ Test a model and save a history report. Parameters ---------- history : memote.HistoryManager The manager grants access to previous results. config : dict, optional The final test report configuration. html : bool, optional Whether to render the report as full HTML or JSON (default True). """ if config is None: config = ReportConfiguration.load() report = HistoryReport(history=history, configuration=config) if html: return report.render_html() else: return report.render_json()
[ "def", "history_report", "(", "history", ",", "config", "=", "None", ",", "html", "=", "True", ")", ":", "if", "config", "is", "None", ":", "config", "=", "ReportConfiguration", ".", "load", "(", ")", "report", "=", "HistoryReport", "(", "history", "=", "history", ",", "configuration", "=", "config", ")", "if", "html", ":", "return", "report", ".", "render_html", "(", ")", "else", ":", "return", "report", ".", "render_json", "(", ")" ]
Test a model and save a history report. Parameters ---------- history : memote.HistoryManager The manager grants access to previous results. config : dict, optional The final test report configuration. html : bool, optional Whether to render the report as full HTML or JSON (default True).
[ "Test", "a", "model", "and", "save", "a", "history", "report", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/suite/api.py#L135-L155
opencobra/memote
memote/suite/api.py
diff_report
def diff_report(diff_results, config=None, html=True): """ Generate a diff report from a result set and configuration. Parameters ---------- diff_results : iterable of memote.MemoteResult Nested dictionary structure as returned from the test suite. config : dict, optional The final test report configuration (default None). html : bool, optional Whether to render the report as full HTML or JSON (default True). """ if config is None: config = ReportConfiguration.load() report = DiffReport(diff_results=diff_results, configuration=config) if html: return report.render_html() else: return report.render_json()
python
def diff_report(diff_results, config=None, html=True): """ Generate a diff report from a result set and configuration. Parameters ---------- diff_results : iterable of memote.MemoteResult Nested dictionary structure as returned from the test suite. config : dict, optional The final test report configuration (default None). html : bool, optional Whether to render the report as full HTML or JSON (default True). """ if config is None: config = ReportConfiguration.load() report = DiffReport(diff_results=diff_results, configuration=config) if html: return report.render_html() else: return report.render_json()
[ "def", "diff_report", "(", "diff_results", ",", "config", "=", "None", ",", "html", "=", "True", ")", ":", "if", "config", "is", "None", ":", "config", "=", "ReportConfiguration", ".", "load", "(", ")", "report", "=", "DiffReport", "(", "diff_results", "=", "diff_results", ",", "configuration", "=", "config", ")", "if", "html", ":", "return", "report", ".", "render_html", "(", ")", "else", ":", "return", "report", ".", "render_json", "(", ")" ]
Generate a diff report from a result set and configuration. Parameters ---------- diff_results : iterable of memote.MemoteResult Nested dictionary structure as returned from the test suite. config : dict, optional The final test report configuration (default None). html : bool, optional Whether to render the report as full HTML or JSON (default True).
[ "Generate", "a", "diff", "report", "from", "a", "result", "set", "and", "configuration", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/suite/api.py#L158-L178
opencobra/memote
memote/suite/api.py
validation_report
def validation_report(path, notifications, filename): """ Generate a validation report from a notification object. Parameters ---------- path : string Path to model file. notifications : dict A simple dictionary structure containing a list of errors and warnings. """ env = Environment( loader=PackageLoader('memote.suite', 'templates'), autoescape=select_autoescape(['html', 'xml']) ) template = env.get_template('validation_template.html') model = os.path.basename(path) with open(filename, "w") as file_h: file_h.write(template.render(model=model, notifications=notifications))
python
def validation_report(path, notifications, filename): """ Generate a validation report from a notification object. Parameters ---------- path : string Path to model file. notifications : dict A simple dictionary structure containing a list of errors and warnings. """ env = Environment( loader=PackageLoader('memote.suite', 'templates'), autoescape=select_autoescape(['html', 'xml']) ) template = env.get_template('validation_template.html') model = os.path.basename(path) with open(filename, "w") as file_h: file_h.write(template.render(model=model, notifications=notifications))
[ "def", "validation_report", "(", "path", ",", "notifications", ",", "filename", ")", ":", "env", "=", "Environment", "(", "loader", "=", "PackageLoader", "(", "'memote.suite'", ",", "'templates'", ")", ",", "autoescape", "=", "select_autoescape", "(", "[", "'html'", ",", "'xml'", "]", ")", ")", "template", "=", "env", ".", "get_template", "(", "'validation_template.html'", ")", "model", "=", "os", ".", "path", ".", "basename", "(", "path", ")", "with", "open", "(", "filename", ",", "\"w\"", ")", "as", "file_h", ":", "file_h", ".", "write", "(", "template", ".", "render", "(", "model", "=", "model", ",", "notifications", "=", "notifications", ")", ")" ]
Generate a validation report from a notification object. Parameters ---------- path : string Path to model file. notifications : dict A simple dictionary structure containing a list of errors and warnings.
[ "Generate", "a", "validation", "report", "from", "a", "notification", "object", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/suite/api.py#L181-L200
opencobra/memote
memote/suite/reporting/config.py
ReportConfiguration.load
def load(cls, filename=None): """Load a test report configuration.""" if filename is None: LOGGER.debug("Loading default configuration.") with open_text(templates, "test_config.yml", encoding="utf-8") as file_handle: content = yaml.load(file_handle) else: LOGGER.debug("Loading custom configuration '%s'.", filename) try: with open(filename, encoding="utf-8") as file_handle: content = yaml.load(file_handle) except IOError as err: LOGGER.error( "Failed to load the custom configuration '%s'. Skipping.", filename) LOGGER.debug(str(err)) content = dict() return cls(content)
python
def load(cls, filename=None): """Load a test report configuration.""" if filename is None: LOGGER.debug("Loading default configuration.") with open_text(templates, "test_config.yml", encoding="utf-8") as file_handle: content = yaml.load(file_handle) else: LOGGER.debug("Loading custom configuration '%s'.", filename) try: with open(filename, encoding="utf-8") as file_handle: content = yaml.load(file_handle) except IOError as err: LOGGER.error( "Failed to load the custom configuration '%s'. Skipping.", filename) LOGGER.debug(str(err)) content = dict() return cls(content)
[ "def", "load", "(", "cls", ",", "filename", "=", "None", ")", ":", "if", "filename", "is", "None", ":", "LOGGER", ".", "debug", "(", "\"Loading default configuration.\"", ")", "with", "open_text", "(", "templates", ",", "\"test_config.yml\"", ",", "encoding", "=", "\"utf-8\"", ")", "as", "file_handle", ":", "content", "=", "yaml", ".", "load", "(", "file_handle", ")", "else", ":", "LOGGER", ".", "debug", "(", "\"Loading custom configuration '%s'.\"", ",", "filename", ")", "try", ":", "with", "open", "(", "filename", ",", "encoding", "=", "\"utf-8\"", ")", "as", "file_handle", ":", "content", "=", "yaml", ".", "load", "(", "file_handle", ")", "except", "IOError", "as", "err", ":", "LOGGER", ".", "error", "(", "\"Failed to load the custom configuration '%s'. Skipping.\"", ",", "filename", ")", "LOGGER", ".", "debug", "(", "str", "(", "err", ")", ")", "content", "=", "dict", "(", ")", "return", "cls", "(", "content", ")" ]
Load a test report configuration.
[ "Load", "a", "test", "report", "configuration", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/suite/reporting/config.py#L52-L70
opencobra/memote
memote/support/gpr_helpers.py
find_top_level_complex
def find_top_level_complex(gpr): """ Find unique elements of both branches of the top level logical AND. Parameters ---------- gpr : str The gene-protein-reaction association as a string. Returns ------- int The size of the symmetric difference between the set of elements to the left of the top level logical AND and the right set. """ logger.debug("%r", gpr) conform = logical_and.sub("and", gpr) conform = logical_or.sub("or", conform) conform = escape_chars.sub("_", conform) expression = ast.parse(conform) walker = GPRVisitor() walker.visit(expression) return len(walker.left ^ walker.right)
python
def find_top_level_complex(gpr): """ Find unique elements of both branches of the top level logical AND. Parameters ---------- gpr : str The gene-protein-reaction association as a string. Returns ------- int The size of the symmetric difference between the set of elements to the left of the top level logical AND and the right set. """ logger.debug("%r", gpr) conform = logical_and.sub("and", gpr) conform = logical_or.sub("or", conform) conform = escape_chars.sub("_", conform) expression = ast.parse(conform) walker = GPRVisitor() walker.visit(expression) return len(walker.left ^ walker.right)
[ "def", "find_top_level_complex", "(", "gpr", ")", ":", "logger", ".", "debug", "(", "\"%r\"", ",", "gpr", ")", "conform", "=", "logical_and", ".", "sub", "(", "\"and\"", ",", "gpr", ")", "conform", "=", "logical_or", ".", "sub", "(", "\"or\"", ",", "conform", ")", "conform", "=", "escape_chars", ".", "sub", "(", "\"_\"", ",", "conform", ")", "expression", "=", "ast", ".", "parse", "(", "conform", ")", "walker", "=", "GPRVisitor", "(", ")", "walker", ".", "visit", "(", "expression", ")", "return", "len", "(", "walker", ".", "left", "^", "walker", ".", "right", ")" ]
Find unique elements of both branches of the top level logical AND. Parameters ---------- gpr : str The gene-protein-reaction association as a string. Returns ------- int The size of the symmetric difference between the set of elements to the left of the top level logical AND and the right set.
[ "Find", "unique", "elements", "of", "both", "branches", "of", "the", "top", "level", "logical", "AND", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/gpr_helpers.py#L107-L130
opencobra/memote
memote/support/gpr_helpers.py
GPRVisitor.visit_BoolOp
def visit_BoolOp(self, node): """Set up recording of elements with this hook.""" if self._is_top and isinstance(node.op, ast.And): self._is_top = False self._current = self.left self.visit(node.values[0]) self._current = self.right for successor in node.values[1:]: self.visit(successor) else: self.generic_visit(node)
python
def visit_BoolOp(self, node): """Set up recording of elements with this hook.""" if self._is_top and isinstance(node.op, ast.And): self._is_top = False self._current = self.left self.visit(node.values[0]) self._current = self.right for successor in node.values[1:]: self.visit(successor) else: self.generic_visit(node)
[ "def", "visit_BoolOp", "(", "self", ",", "node", ")", ":", "if", "self", ".", "_is_top", "and", "isinstance", "(", "node", ".", "op", ",", "ast", ".", "And", ")", ":", "self", ".", "_is_top", "=", "False", "self", ".", "_current", "=", "self", ".", "left", "self", ".", "visit", "(", "node", ".", "values", "[", "0", "]", ")", "self", ".", "_current", "=", "self", ".", "right", "for", "successor", "in", "node", ".", "values", "[", "1", ":", "]", ":", "self", ".", "visit", "(", "successor", ")", "else", ":", "self", ".", "generic_visit", "(", "node", ")" ]
Set up recording of elements with this hook.
[ "Set", "up", "recording", "of", "elements", "with", "this", "hook", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/gpr_helpers.py#L90-L100
opencobra/memote
memote/support/basic.py
find_nonzero_constrained_reactions
def find_nonzero_constrained_reactions(model): """Return list of reactions with non-zero, non-maximal bounds.""" lower_bound, upper_bound = helpers.find_bounds(model) return [rxn for rxn in model.reactions if 0 > rxn.lower_bound > lower_bound or 0 < rxn.upper_bound < upper_bound]
python
def find_nonzero_constrained_reactions(model): """Return list of reactions with non-zero, non-maximal bounds.""" lower_bound, upper_bound = helpers.find_bounds(model) return [rxn for rxn in model.reactions if 0 > rxn.lower_bound > lower_bound or 0 < rxn.upper_bound < upper_bound]
[ "def", "find_nonzero_constrained_reactions", "(", "model", ")", ":", "lower_bound", ",", "upper_bound", "=", "helpers", ".", "find_bounds", "(", "model", ")", "return", "[", "rxn", "for", "rxn", "in", "model", ".", "reactions", "if", "0", ">", "rxn", ".", "lower_bound", ">", "lower_bound", "or", "0", "<", "rxn", ".", "upper_bound", "<", "upper_bound", "]" ]
Return list of reactions with non-zero, non-maximal bounds.
[ "Return", "list", "of", "reactions", "with", "non", "-", "zero", "non", "-", "maximal", "bounds", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/basic.py#L53-L58
opencobra/memote
memote/support/basic.py
find_zero_constrained_reactions
def find_zero_constrained_reactions(model): """Return list of reactions that are constrained to zero flux.""" return [rxn for rxn in model.reactions if rxn.lower_bound == 0 and rxn.upper_bound == 0]
python
def find_zero_constrained_reactions(model): """Return list of reactions that are constrained to zero flux.""" return [rxn for rxn in model.reactions if rxn.lower_bound == 0 and rxn.upper_bound == 0]
[ "def", "find_zero_constrained_reactions", "(", "model", ")", ":", "return", "[", "rxn", "for", "rxn", "in", "model", ".", "reactions", "if", "rxn", ".", "lower_bound", "==", "0", "and", "rxn", ".", "upper_bound", "==", "0", "]" ]
Return list of reactions that are constrained to zero flux.
[ "Return", "list", "of", "reactions", "that", "are", "constrained", "to", "zero", "flux", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/basic.py#L61-L65
opencobra/memote
memote/support/basic.py
find_unconstrained_reactions
def find_unconstrained_reactions(model): """Return list of reactions that are not constrained at all.""" lower_bound, upper_bound = helpers.find_bounds(model) return [rxn for rxn in model.reactions if rxn.lower_bound <= lower_bound and rxn.upper_bound >= upper_bound]
python
def find_unconstrained_reactions(model): """Return list of reactions that are not constrained at all.""" lower_bound, upper_bound = helpers.find_bounds(model) return [rxn for rxn in model.reactions if rxn.lower_bound <= lower_bound and rxn.upper_bound >= upper_bound]
[ "def", "find_unconstrained_reactions", "(", "model", ")", ":", "lower_bound", ",", "upper_bound", "=", "helpers", ".", "find_bounds", "(", "model", ")", "return", "[", "rxn", "for", "rxn", "in", "model", ".", "reactions", "if", "rxn", ".", "lower_bound", "<=", "lower_bound", "and", "rxn", ".", "upper_bound", ">=", "upper_bound", "]" ]
Return list of reactions that are not constrained at all.
[ "Return", "list", "of", "reactions", "that", "are", "not", "constrained", "at", "all", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/basic.py#L73-L78
opencobra/memote
memote/support/basic.py
find_ngam
def find_ngam(model): u""" Return all potential non growth-associated maintenance reactions. From the list of all reactions that convert ATP to ADP select the reactions that match a defined reaction string and whose metabolites are situated within the main model compartment. The main model compartment is the cytosol, and if that cannot be identified, the compartment with the most metabolites. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- list Reactions that qualify as non-growth associated maintenance reactions. Notes ----- [1]_ define the non-growth associated maintenance (NGAM) as the energy required to maintain all constant processes such as turgor pressure and other housekeeping activities. In metabolic models this is expressed by requiring a simple ATP hydrolysis reaction to always have a fixed minimal amount of flux. This value can be measured as described by [1]_ . References ---------- .. [1] Thiele, I., & Palsson, B. Ø. (2010, January). A protocol for generating a high-quality genome-scale metabolic reconstruction. Nature protocols. Nature Publishing Group. http://doi.org/10.1038/nprot.2009.203 """ atp_adp_conv_rxns = helpers.find_converting_reactions( model, ("MNXM3", "MNXM7") ) id_of_main_compartment = helpers.find_compartment_id_in_model(model, 'c') reactants = { helpers.find_met_in_model(model, "MNXM3", id_of_main_compartment)[0], helpers.find_met_in_model(model, "MNXM2", id_of_main_compartment)[0] } products = { helpers.find_met_in_model(model, "MNXM7", id_of_main_compartment)[0], helpers.find_met_in_model(model, "MNXM1", id_of_main_compartment)[0], helpers.find_met_in_model(model, "MNXM9", id_of_main_compartment)[0] } candidates = [rxn for rxn in atp_adp_conv_rxns if rxn.reversibility is False and set(rxn.reactants) == reactants and set(rxn.products) == products] buzzwords = ['maintenance', 'atpm', 'requirement', 'ngam', 'non-growth', 'associated'] refined_candidates = [rxn for rxn in candidates if any( string in filter_none(rxn.name, '').lower() for string in buzzwords )] if refined_candidates: return refined_candidates else: return candidates
python
def find_ngam(model): u""" Return all potential non growth-associated maintenance reactions. From the list of all reactions that convert ATP to ADP select the reactions that match a defined reaction string and whose metabolites are situated within the main model compartment. The main model compartment is the cytosol, and if that cannot be identified, the compartment with the most metabolites. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- list Reactions that qualify as non-growth associated maintenance reactions. Notes ----- [1]_ define the non-growth associated maintenance (NGAM) as the energy required to maintain all constant processes such as turgor pressure and other housekeeping activities. In metabolic models this is expressed by requiring a simple ATP hydrolysis reaction to always have a fixed minimal amount of flux. This value can be measured as described by [1]_ . References ---------- .. [1] Thiele, I., & Palsson, B. Ø. (2010, January). A protocol for generating a high-quality genome-scale metabolic reconstruction. Nature protocols. Nature Publishing Group. http://doi.org/10.1038/nprot.2009.203 """ atp_adp_conv_rxns = helpers.find_converting_reactions( model, ("MNXM3", "MNXM7") ) id_of_main_compartment = helpers.find_compartment_id_in_model(model, 'c') reactants = { helpers.find_met_in_model(model, "MNXM3", id_of_main_compartment)[0], helpers.find_met_in_model(model, "MNXM2", id_of_main_compartment)[0] } products = { helpers.find_met_in_model(model, "MNXM7", id_of_main_compartment)[0], helpers.find_met_in_model(model, "MNXM1", id_of_main_compartment)[0], helpers.find_met_in_model(model, "MNXM9", id_of_main_compartment)[0] } candidates = [rxn for rxn in atp_adp_conv_rxns if rxn.reversibility is False and set(rxn.reactants) == reactants and set(rxn.products) == products] buzzwords = ['maintenance', 'atpm', 'requirement', 'ngam', 'non-growth', 'associated'] refined_candidates = [rxn for rxn in candidates if any( string in filter_none(rxn.name, '').lower() for string in buzzwords )] if refined_candidates: return refined_candidates else: return candidates
[ "def", "find_ngam", "(", "model", ")", ":", "atp_adp_conv_rxns", "=", "helpers", ".", "find_converting_reactions", "(", "model", ",", "(", "\"MNXM3\"", ",", "\"MNXM7\"", ")", ")", "id_of_main_compartment", "=", "helpers", ".", "find_compartment_id_in_model", "(", "model", ",", "'c'", ")", "reactants", "=", "{", "helpers", ".", "find_met_in_model", "(", "model", ",", "\"MNXM3\"", ",", "id_of_main_compartment", ")", "[", "0", "]", ",", "helpers", ".", "find_met_in_model", "(", "model", ",", "\"MNXM2\"", ",", "id_of_main_compartment", ")", "[", "0", "]", "}", "products", "=", "{", "helpers", ".", "find_met_in_model", "(", "model", ",", "\"MNXM7\"", ",", "id_of_main_compartment", ")", "[", "0", "]", ",", "helpers", ".", "find_met_in_model", "(", "model", ",", "\"MNXM1\"", ",", "id_of_main_compartment", ")", "[", "0", "]", ",", "helpers", ".", "find_met_in_model", "(", "model", ",", "\"MNXM9\"", ",", "id_of_main_compartment", ")", "[", "0", "]", "}", "candidates", "=", "[", "rxn", "for", "rxn", "in", "atp_adp_conv_rxns", "if", "rxn", ".", "reversibility", "is", "False", "and", "set", "(", "rxn", ".", "reactants", ")", "==", "reactants", "and", "set", "(", "rxn", ".", "products", ")", "==", "products", "]", "buzzwords", "=", "[", "'maintenance'", ",", "'atpm'", ",", "'requirement'", ",", "'ngam'", ",", "'non-growth'", ",", "'associated'", "]", "refined_candidates", "=", "[", "rxn", "for", "rxn", "in", "candidates", "if", "any", "(", "string", "in", "filter_none", "(", "rxn", ".", "name", ",", "''", ")", ".", "lower", "(", ")", "for", "string", "in", "buzzwords", ")", "]", "if", "refined_candidates", ":", "return", "refined_candidates", "else", ":", "return", "candidates" ]
u""" Return all potential non growth-associated maintenance reactions. From the list of all reactions that convert ATP to ADP select the reactions that match a defined reaction string and whose metabolites are situated within the main model compartment. The main model compartment is the cytosol, and if that cannot be identified, the compartment with the most metabolites. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- list Reactions that qualify as non-growth associated maintenance reactions. Notes ----- [1]_ define the non-growth associated maintenance (NGAM) as the energy required to maintain all constant processes such as turgor pressure and other housekeeping activities. In metabolic models this is expressed by requiring a simple ATP hydrolysis reaction to always have a fixed minimal amount of flux. This value can be measured as described by [1]_ . References ---------- .. [1] Thiele, I., & Palsson, B. Ø. (2010, January). A protocol for generating a high-quality genome-scale metabolic reconstruction. Nature protocols. Nature Publishing Group. http://doi.org/10.1038/nprot.2009.203
[ "u", "Return", "all", "potential", "non", "growth", "-", "associated", "maintenance", "reactions", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/basic.py#L81-L148
opencobra/memote
memote/support/basic.py
calculate_metabolic_coverage
def calculate_metabolic_coverage(model): u""" Return the ratio of reactions and genes included in the model. Determine whether the amount of reactions and genes in model not equal to zero, then return the ratio. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- float The ratio of reactions to genes also called metabolic coverage. Raises ------ ValueError If the model does not contain either reactions or genes. Notes ----- According to [1]_ this is a good quality indicator expressing the degree of metabolic coverage i.e. modeling detail of a given reconstruction. The authors explain that models with a 'high level of modeling detail have ratios >1, and [models] with low level of detail have ratios <1'. They explain that 'this difference arises because [models] with basic or intermediate levels of detail often include many reactions in which several gene products and their enzymatic transformations are ‘lumped’'. References ---------- .. [1] Monk, J., Nogales, J., & Palsson, B. O. (2014). Optimizing genome-scale network reconstructions. Nature Biotechnology, 32(5), 447–452. http://doi.org/10.1038/nbt.2870 """ if len(model.reactions) == 0 or len(model.genes) == 0: raise ValueError("The model contains no reactions or genes.") return float(len(model.reactions)) / float(len(model.genes))
python
def calculate_metabolic_coverage(model): u""" Return the ratio of reactions and genes included in the model. Determine whether the amount of reactions and genes in model not equal to zero, then return the ratio. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- float The ratio of reactions to genes also called metabolic coverage. Raises ------ ValueError If the model does not contain either reactions or genes. Notes ----- According to [1]_ this is a good quality indicator expressing the degree of metabolic coverage i.e. modeling detail of a given reconstruction. The authors explain that models with a 'high level of modeling detail have ratios >1, and [models] with low level of detail have ratios <1'. They explain that 'this difference arises because [models] with basic or intermediate levels of detail often include many reactions in which several gene products and their enzymatic transformations are ‘lumped’'. References ---------- .. [1] Monk, J., Nogales, J., & Palsson, B. O. (2014). Optimizing genome-scale network reconstructions. Nature Biotechnology, 32(5), 447–452. http://doi.org/10.1038/nbt.2870 """ if len(model.reactions) == 0 or len(model.genes) == 0: raise ValueError("The model contains no reactions or genes.") return float(len(model.reactions)) / float(len(model.genes))
[ "def", "calculate_metabolic_coverage", "(", "model", ")", ":", "if", "len", "(", "model", ".", "reactions", ")", "==", "0", "or", "len", "(", "model", ".", "genes", ")", "==", "0", ":", "raise", "ValueError", "(", "\"The model contains no reactions or genes.\"", ")", "return", "float", "(", "len", "(", "model", ".", "reactions", ")", ")", "/", "float", "(", "len", "(", "model", ".", "genes", ")", ")" ]
u""" Return the ratio of reactions and genes included in the model. Determine whether the amount of reactions and genes in model not equal to zero, then return the ratio. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- float The ratio of reactions to genes also called metabolic coverage. Raises ------ ValueError If the model does not contain either reactions or genes. Notes ----- According to [1]_ this is a good quality indicator expressing the degree of metabolic coverage i.e. modeling detail of a given reconstruction. The authors explain that models with a 'high level of modeling detail have ratios >1, and [models] with low level of detail have ratios <1'. They explain that 'this difference arises because [models] with basic or intermediate levels of detail often include many reactions in which several gene products and their enzymatic transformations are ‘lumped’'. References ---------- .. [1] Monk, J., Nogales, J., & Palsson, B. O. (2014). Optimizing genome-scale network reconstructions. Nature Biotechnology, 32(5), 447–452. http://doi.org/10.1038/nbt.2870
[ "u", "Return", "the", "ratio", "of", "reactions", "and", "genes", "included", "in", "the", "model", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/basic.py#L151-L192
opencobra/memote
memote/support/basic.py
find_protein_complexes
def find_protein_complexes(model): """ Find reactions that are catalyzed by at least a heterodimer. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- list Reactions whose gene-protein-reaction association contains at least one logical AND combining different gene products (heterodimer). """ complexes = [] for rxn in model.reactions: if not rxn.gene_reaction_rule: continue size = find_top_level_complex(rxn.gene_reaction_rule) if size >= 2: complexes.append(rxn) return complexes
python
def find_protein_complexes(model): """ Find reactions that are catalyzed by at least a heterodimer. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- list Reactions whose gene-protein-reaction association contains at least one logical AND combining different gene products (heterodimer). """ complexes = [] for rxn in model.reactions: if not rxn.gene_reaction_rule: continue size = find_top_level_complex(rxn.gene_reaction_rule) if size >= 2: complexes.append(rxn) return complexes
[ "def", "find_protein_complexes", "(", "model", ")", ":", "complexes", "=", "[", "]", "for", "rxn", "in", "model", ".", "reactions", ":", "if", "not", "rxn", ".", "gene_reaction_rule", ":", "continue", "size", "=", "find_top_level_complex", "(", "rxn", ".", "gene_reaction_rule", ")", "if", "size", ">=", "2", ":", "complexes", ".", "append", "(", "rxn", ")", "return", "complexes" ]
Find reactions that are catalyzed by at least a heterodimer. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- list Reactions whose gene-protein-reaction association contains at least one logical AND combining different gene products (heterodimer).
[ "Find", "reactions", "that", "are", "catalyzed", "by", "at", "least", "a", "heterodimer", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/basic.py#L195-L218
opencobra/memote
memote/support/basic.py
is_constrained_reaction
def is_constrained_reaction(model, rxn): """Return whether a reaction has fixed constraints.""" lower_bound, upper_bound = helpers.find_bounds(model) if rxn.reversibility: return rxn.lower_bound > lower_bound or rxn.upper_bound < upper_bound else: return rxn.lower_bound > 0 or rxn.upper_bound < upper_bound
python
def is_constrained_reaction(model, rxn): """Return whether a reaction has fixed constraints.""" lower_bound, upper_bound = helpers.find_bounds(model) if rxn.reversibility: return rxn.lower_bound > lower_bound or rxn.upper_bound < upper_bound else: return rxn.lower_bound > 0 or rxn.upper_bound < upper_bound
[ "def", "is_constrained_reaction", "(", "model", ",", "rxn", ")", ":", "lower_bound", ",", "upper_bound", "=", "helpers", ".", "find_bounds", "(", "model", ")", "if", "rxn", ".", "reversibility", ":", "return", "rxn", ".", "lower_bound", ">", "lower_bound", "or", "rxn", ".", "upper_bound", "<", "upper_bound", "else", ":", "return", "rxn", ".", "lower_bound", ">", "0", "or", "rxn", ".", "upper_bound", "<", "upper_bound" ]
Return whether a reaction has fixed constraints.
[ "Return", "whether", "a", "reaction", "has", "fixed", "constraints", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/basic.py#L236-L242
opencobra/memote
memote/support/basic.py
find_oxygen_reactions
def find_oxygen_reactions(model): """Return list of oxygen-producing/-consuming reactions.""" o2_in_model = helpers.find_met_in_model(model, "MNXM4") return set([rxn for met in model.metabolites for rxn in met.reactions if met.formula == "O2" or met in o2_in_model])
python
def find_oxygen_reactions(model): """Return list of oxygen-producing/-consuming reactions.""" o2_in_model = helpers.find_met_in_model(model, "MNXM4") return set([rxn for met in model.metabolites for rxn in met.reactions if met.formula == "O2" or met in o2_in_model])
[ "def", "find_oxygen_reactions", "(", "model", ")", ":", "o2_in_model", "=", "helpers", ".", "find_met_in_model", "(", "model", ",", "\"MNXM4\"", ")", "return", "set", "(", "[", "rxn", "for", "met", "in", "model", ".", "metabolites", "for", "rxn", "in", "met", ".", "reactions", "if", "met", ".", "formula", "==", "\"O2\"", "or", "met", "in", "o2_in_model", "]", ")" ]
Return list of oxygen-producing/-consuming reactions.
[ "Return", "list", "of", "oxygen", "-", "producing", "/", "-", "consuming", "reactions", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/basic.py#L245-L250
opencobra/memote
memote/support/basic.py
find_unique_metabolites
def find_unique_metabolites(model): """Return set of metabolite IDs without duplicates from compartments.""" unique = set() for met in model.metabolites: is_missing = True for comp in model.compartments: if met.id.endswith("_{}".format(comp)): unique.add(met.id[:-(len(comp) + 1)]) is_missing = False break if is_missing: unique.add(met.id) return unique
python
def find_unique_metabolites(model): """Return set of metabolite IDs without duplicates from compartments.""" unique = set() for met in model.metabolites: is_missing = True for comp in model.compartments: if met.id.endswith("_{}".format(comp)): unique.add(met.id[:-(len(comp) + 1)]) is_missing = False break if is_missing: unique.add(met.id) return unique
[ "def", "find_unique_metabolites", "(", "model", ")", ":", "unique", "=", "set", "(", ")", "for", "met", "in", "model", ".", "metabolites", ":", "is_missing", "=", "True", "for", "comp", "in", "model", ".", "compartments", ":", "if", "met", ".", "id", ".", "endswith", "(", "\"_{}\"", ".", "format", "(", "comp", ")", ")", ":", "unique", ".", "add", "(", "met", ".", "id", "[", ":", "-", "(", "len", "(", "comp", ")", "+", "1", ")", "]", ")", "is_missing", "=", "False", "break", "if", "is_missing", ":", "unique", ".", "add", "(", "met", ".", "id", ")", "return", "unique" ]
Return set of metabolite IDs without duplicates from compartments.
[ "Return", "set", "of", "metabolite", "IDs", "without", "duplicates", "from", "compartments", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/basic.py#L253-L265
opencobra/memote
memote/support/basic.py
find_duplicate_metabolites_in_compartments
def find_duplicate_metabolites_in_compartments(model): """ Return list of metabolites with duplicates in the same compartment. This function identifies duplicate metabolites in each compartment by determining if any two metabolites have identical InChI-key annotations. For instance, this function would find compounds with IDs ATP1 and ATP2 in the cytosolic compartment, with both having the same InChI annotations. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- list A list of tuples of duplicate metabolites. """ unique_identifiers = ["inchikey", "inchi"] duplicates = [] for met_1, met_2 in combinations(model.metabolites, 2): if met_1.compartment == met_2.compartment: for key in unique_identifiers: if key in met_1.annotation and key in met_2.annotation: if met_1.annotation[key] == met_2.annotation[key]: duplicates.append((met_1.id, met_2.id)) break return duplicates
python
def find_duplicate_metabolites_in_compartments(model): """ Return list of metabolites with duplicates in the same compartment. This function identifies duplicate metabolites in each compartment by determining if any two metabolites have identical InChI-key annotations. For instance, this function would find compounds with IDs ATP1 and ATP2 in the cytosolic compartment, with both having the same InChI annotations. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- list A list of tuples of duplicate metabolites. """ unique_identifiers = ["inchikey", "inchi"] duplicates = [] for met_1, met_2 in combinations(model.metabolites, 2): if met_1.compartment == met_2.compartment: for key in unique_identifiers: if key in met_1.annotation and key in met_2.annotation: if met_1.annotation[key] == met_2.annotation[key]: duplicates.append((met_1.id, met_2.id)) break return duplicates
[ "def", "find_duplicate_metabolites_in_compartments", "(", "model", ")", ":", "unique_identifiers", "=", "[", "\"inchikey\"", ",", "\"inchi\"", "]", "duplicates", "=", "[", "]", "for", "met_1", ",", "met_2", "in", "combinations", "(", "model", ".", "metabolites", ",", "2", ")", ":", "if", "met_1", ".", "compartment", "==", "met_2", ".", "compartment", ":", "for", "key", "in", "unique_identifiers", ":", "if", "key", "in", "met_1", ".", "annotation", "and", "key", "in", "met_2", ".", "annotation", ":", "if", "met_1", ".", "annotation", "[", "key", "]", "==", "met_2", ".", "annotation", "[", "key", "]", ":", "duplicates", ".", "append", "(", "(", "met_1", ".", "id", ",", "met_2", ".", "id", ")", ")", "break", "return", "duplicates" ]
Return list of metabolites with duplicates in the same compartment. This function identifies duplicate metabolites in each compartment by determining if any two metabolites have identical InChI-key annotations. For instance, this function would find compounds with IDs ATP1 and ATP2 in the cytosolic compartment, with both having the same InChI annotations. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- list A list of tuples of duplicate metabolites.
[ "Return", "list", "of", "metabolites", "with", "duplicates", "in", "the", "same", "compartment", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/basic.py#L269-L298
opencobra/memote
memote/support/basic.py
find_reactions_with_partially_identical_annotations
def find_reactions_with_partially_identical_annotations(model): """ Return duplicate reactions based on identical annotation. Identify duplicate reactions globally by checking if any two metabolic reactions have the same entries in their annotation attributes. This can be useful to identify one 'type' of reactions that occurs in several compartments, to curate merged models or to clean-up bulk model modifications. The heuristic looks at annotations with the keys "metanetx.reaction", "kegg.reaction", "brenda", "rhea", "biocyc", "bigg.reaction" only. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- dict A mapping from sets of annotations to groups of reactions with those annotations. int The total number of unique reactions that are duplicated. """ duplicates = {} rxn_db_identifiers = ["metanetx.reaction", "kegg.reaction", "brenda", "rhea", "biocyc", "bigg.reaction"] # Build a list that associates a reaction with a set of its annotations. ann_rxns = [] for rxn in model.reactions: ann = [] for key in rxn_db_identifiers: if key in rxn.annotation: if isinstance(rxn.annotation[key], list): ann.extend([(key, elem) for elem in rxn.annotation[key]]) else: ann.append((key, rxn.annotation[key])) ann_rxns.append((rxn, frozenset(ann))) # Compute the intersection between annotations and record the matching # reaction identifiers. for (rxn_a, ann_a), (rxn_b, ann_b) in combinations(ann_rxns, 2): mutual_pair = tuple(ann_a & ann_b) if len(mutual_pair) > 0: duplicates.setdefault(mutual_pair, set()).update( [rxn_a.id, rxn_b.id]) # Transform the object for JSON compatibility num_duplicated = set() duplicated = {} for key in duplicates: # Object keys must be strings in JSON. new_key = ",".join(sorted("{}:{}".format(ns, term) for ns, term in key)) duplicated[new_key] = rxns = list(duplicates[key]) num_duplicated.update(rxns) return duplicated, len(num_duplicated)
python
def find_reactions_with_partially_identical_annotations(model): """ Return duplicate reactions based on identical annotation. Identify duplicate reactions globally by checking if any two metabolic reactions have the same entries in their annotation attributes. This can be useful to identify one 'type' of reactions that occurs in several compartments, to curate merged models or to clean-up bulk model modifications. The heuristic looks at annotations with the keys "metanetx.reaction", "kegg.reaction", "brenda", "rhea", "biocyc", "bigg.reaction" only. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- dict A mapping from sets of annotations to groups of reactions with those annotations. int The total number of unique reactions that are duplicated. """ duplicates = {} rxn_db_identifiers = ["metanetx.reaction", "kegg.reaction", "brenda", "rhea", "biocyc", "bigg.reaction"] # Build a list that associates a reaction with a set of its annotations. ann_rxns = [] for rxn in model.reactions: ann = [] for key in rxn_db_identifiers: if key in rxn.annotation: if isinstance(rxn.annotation[key], list): ann.extend([(key, elem) for elem in rxn.annotation[key]]) else: ann.append((key, rxn.annotation[key])) ann_rxns.append((rxn, frozenset(ann))) # Compute the intersection between annotations and record the matching # reaction identifiers. for (rxn_a, ann_a), (rxn_b, ann_b) in combinations(ann_rxns, 2): mutual_pair = tuple(ann_a & ann_b) if len(mutual_pair) > 0: duplicates.setdefault(mutual_pair, set()).update( [rxn_a.id, rxn_b.id]) # Transform the object for JSON compatibility num_duplicated = set() duplicated = {} for key in duplicates: # Object keys must be strings in JSON. new_key = ",".join(sorted("{}:{}".format(ns, term) for ns, term in key)) duplicated[new_key] = rxns = list(duplicates[key]) num_duplicated.update(rxns) return duplicated, len(num_duplicated)
[ "def", "find_reactions_with_partially_identical_annotations", "(", "model", ")", ":", "duplicates", "=", "{", "}", "rxn_db_identifiers", "=", "[", "\"metanetx.reaction\"", ",", "\"kegg.reaction\"", ",", "\"brenda\"", ",", "\"rhea\"", ",", "\"biocyc\"", ",", "\"bigg.reaction\"", "]", "# Build a list that associates a reaction with a set of its annotations.", "ann_rxns", "=", "[", "]", "for", "rxn", "in", "model", ".", "reactions", ":", "ann", "=", "[", "]", "for", "key", "in", "rxn_db_identifiers", ":", "if", "key", "in", "rxn", ".", "annotation", ":", "if", "isinstance", "(", "rxn", ".", "annotation", "[", "key", "]", ",", "list", ")", ":", "ann", ".", "extend", "(", "[", "(", "key", ",", "elem", ")", "for", "elem", "in", "rxn", ".", "annotation", "[", "key", "]", "]", ")", "else", ":", "ann", ".", "append", "(", "(", "key", ",", "rxn", ".", "annotation", "[", "key", "]", ")", ")", "ann_rxns", ".", "append", "(", "(", "rxn", ",", "frozenset", "(", "ann", ")", ")", ")", "# Compute the intersection between annotations and record the matching", "# reaction identifiers.", "for", "(", "rxn_a", ",", "ann_a", ")", ",", "(", "rxn_b", ",", "ann_b", ")", "in", "combinations", "(", "ann_rxns", ",", "2", ")", ":", "mutual_pair", "=", "tuple", "(", "ann_a", "&", "ann_b", ")", "if", "len", "(", "mutual_pair", ")", ">", "0", ":", "duplicates", ".", "setdefault", "(", "mutual_pair", ",", "set", "(", ")", ")", ".", "update", "(", "[", "rxn_a", ".", "id", ",", "rxn_b", ".", "id", "]", ")", "# Transform the object for JSON compatibility", "num_duplicated", "=", "set", "(", ")", "duplicated", "=", "{", "}", "for", "key", "in", "duplicates", ":", "# Object keys must be strings in JSON.", "new_key", "=", "\",\"", ".", "join", "(", "sorted", "(", "\"{}:{}\"", ".", "format", "(", "ns", ",", "term", ")", "for", "ns", ",", "term", "in", "key", ")", ")", "duplicated", "[", "new_key", "]", "=", "rxns", "=", "list", "(", "duplicates", "[", "key", "]", ")", "num_duplicated", ".", "update", "(", "rxns", ")", "return", "duplicated", ",", "len", "(", "num_duplicated", ")" ]
Return duplicate reactions based on identical annotation. Identify duplicate reactions globally by checking if any two metabolic reactions have the same entries in their annotation attributes. This can be useful to identify one 'type' of reactions that occurs in several compartments, to curate merged models or to clean-up bulk model modifications. The heuristic looks at annotations with the keys "metanetx.reaction", "kegg.reaction", "brenda", "rhea", "biocyc", "bigg.reaction" only. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- dict A mapping from sets of annotations to groups of reactions with those annotations. int The total number of unique reactions that are duplicated.
[ "Return", "duplicate", "reactions", "based", "on", "identical", "annotation", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/basic.py#L301-L356
opencobra/memote
memote/support/basic.py
map_metabolites_to_structures
def map_metabolites_to_structures(metabolites, compartments): """ Map metabolites from the identifier namespace to structural space. Metabolites who lack structural annotation (InChI or InChIKey) are ignored. Parameters ---------- metabolites : iterable The cobra.Metabolites to map. compartments : iterable The different compartments to consider. Structures are treated separately for each compartment. Returns ------- dict A mapping from a cobra.Metabolite to its compartment specific structure index. """ # TODO (Moritz Beber): Consider SMILES? unique_identifiers = ["inchikey", "inchi"] met2mol = {} molecules = {c: [] for c in compartments} for met in metabolites: ann = [] for key in unique_identifiers: mol = met.annotation.get(key) if mol is not None: ann.append(mol) # Ignore metabolites without the required information. if len(ann) == 0: continue ann = set(ann) # Compare with other structures in the same compartment. mols = molecules[met.compartment] for i, mol_group in enumerate(mols): if len(ann & mol_group) > 0: mol_group.update(ann) # We map to the index of the group because it is hashable and # cheaper to compare later. met2mol[met] = "{}-{}".format(met.compartment, i) break if met not in met2mol: # The length of the list corresponds to the 0-index after appending. met2mol[met] = "{}-{}".format(met.compartment, len(mols)) mols.append(ann) return met2mol
python
def map_metabolites_to_structures(metabolites, compartments): """ Map metabolites from the identifier namespace to structural space. Metabolites who lack structural annotation (InChI or InChIKey) are ignored. Parameters ---------- metabolites : iterable The cobra.Metabolites to map. compartments : iterable The different compartments to consider. Structures are treated separately for each compartment. Returns ------- dict A mapping from a cobra.Metabolite to its compartment specific structure index. """ # TODO (Moritz Beber): Consider SMILES? unique_identifiers = ["inchikey", "inchi"] met2mol = {} molecules = {c: [] for c in compartments} for met in metabolites: ann = [] for key in unique_identifiers: mol = met.annotation.get(key) if mol is not None: ann.append(mol) # Ignore metabolites without the required information. if len(ann) == 0: continue ann = set(ann) # Compare with other structures in the same compartment. mols = molecules[met.compartment] for i, mol_group in enumerate(mols): if len(ann & mol_group) > 0: mol_group.update(ann) # We map to the index of the group because it is hashable and # cheaper to compare later. met2mol[met] = "{}-{}".format(met.compartment, i) break if met not in met2mol: # The length of the list corresponds to the 0-index after appending. met2mol[met] = "{}-{}".format(met.compartment, len(mols)) mols.append(ann) return met2mol
[ "def", "map_metabolites_to_structures", "(", "metabolites", ",", "compartments", ")", ":", "# TODO (Moritz Beber): Consider SMILES?", "unique_identifiers", "=", "[", "\"inchikey\"", ",", "\"inchi\"", "]", "met2mol", "=", "{", "}", "molecules", "=", "{", "c", ":", "[", "]", "for", "c", "in", "compartments", "}", "for", "met", "in", "metabolites", ":", "ann", "=", "[", "]", "for", "key", "in", "unique_identifiers", ":", "mol", "=", "met", ".", "annotation", ".", "get", "(", "key", ")", "if", "mol", "is", "not", "None", ":", "ann", ".", "append", "(", "mol", ")", "# Ignore metabolites without the required information.", "if", "len", "(", "ann", ")", "==", "0", ":", "continue", "ann", "=", "set", "(", "ann", ")", "# Compare with other structures in the same compartment.", "mols", "=", "molecules", "[", "met", ".", "compartment", "]", "for", "i", ",", "mol_group", "in", "enumerate", "(", "mols", ")", ":", "if", "len", "(", "ann", "&", "mol_group", ")", ">", "0", ":", "mol_group", ".", "update", "(", "ann", ")", "# We map to the index of the group because it is hashable and", "# cheaper to compare later.", "met2mol", "[", "met", "]", "=", "\"{}-{}\"", ".", "format", "(", "met", ".", "compartment", ",", "i", ")", "break", "if", "met", "not", "in", "met2mol", ":", "# The length of the list corresponds to the 0-index after appending.", "met2mol", "[", "met", "]", "=", "\"{}-{}\"", ".", "format", "(", "met", ".", "compartment", ",", "len", "(", "mols", ")", ")", "mols", ".", "append", "(", "ann", ")", "return", "met2mol" ]
Map metabolites from the identifier namespace to structural space. Metabolites who lack structural annotation (InChI or InChIKey) are ignored. Parameters ---------- metabolites : iterable The cobra.Metabolites to map. compartments : iterable The different compartments to consider. Structures are treated separately for each compartment. Returns ------- dict A mapping from a cobra.Metabolite to its compartment specific structure index.
[ "Map", "metabolites", "from", "the", "identifier", "namespace", "to", "structural", "space", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/basic.py#L359-L407
opencobra/memote
memote/support/basic.py
find_duplicate_reactions
def find_duplicate_reactions(model): """ Return a list with pairs of reactions that are functionally identical. Identify duplicate reactions globally by checking if any two reactions have the same metabolites, same directionality and are in the same compartment. This can be useful to curate merged models or to clean-up bulk model modifications. The heuristic compares reactions in a pairwise manner. For each reaction, the metabolite annotations are checked for a description of the structure (via InChI and InChIKey).If they exist, substrates and products as well as the stoichiometries of any reaction pair are compared. Only reactions where the substrates, products, stoichiometry and reversibility are identical are considered to be duplicates. This test will not be able to identify duplicate reactions if there are no structure annotations. Further, it will report reactions with differing bounds as equal if they otherwise match the above conditions. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- list A list of pairs of duplicate reactions based on metabolites. int The number of unique reactions that have a duplicates """ met2mol = map_metabolites_to_structures(model.metabolites, model.compartments) # Build a list associating reactions with their stoichiometry in molecular # structure space. structural = [] for rxn in model.reactions: # Ignore reactions that have metabolites without structures. if not all(met in met2mol for met in rxn.metabolites): continue # We consider substrates and products separately since, for example, # the InChI for H2O and OH is the same. substrates = { met2mol[met]: rxn.get_coefficient(met) for met in rxn.reactants } products = { met2mol[met]: rxn.get_coefficient(met) for met in rxn.products } structural.append((rxn, substrates, products)) # Compare reactions using their structure-based stoichiometries. num_duplicated = set() duplicates = [] for (rxn_a, sub_a, prod_a), (rxn_b, sub_b, prod_b) in combinations( structural, 2): # Compare the substrates. if sub_a != sub_b: continue # Compare the products. if prod_a != prod_b: continue # Compare whether they are both (ir-)reversible. if rxn_a.reversibility != rxn_b.reversibility: continue # TODO (Moritz Beber): We could compare bounds here but it might be # worth knowing about the reactions even if their bounds differ? duplicates.append((rxn_a.id, rxn_b.id)) num_duplicated.add(rxn_a.id) num_duplicated.add(rxn_b.id) return duplicates, len(num_duplicated)
python
def find_duplicate_reactions(model): """ Return a list with pairs of reactions that are functionally identical. Identify duplicate reactions globally by checking if any two reactions have the same metabolites, same directionality and are in the same compartment. This can be useful to curate merged models or to clean-up bulk model modifications. The heuristic compares reactions in a pairwise manner. For each reaction, the metabolite annotations are checked for a description of the structure (via InChI and InChIKey).If they exist, substrates and products as well as the stoichiometries of any reaction pair are compared. Only reactions where the substrates, products, stoichiometry and reversibility are identical are considered to be duplicates. This test will not be able to identify duplicate reactions if there are no structure annotations. Further, it will report reactions with differing bounds as equal if they otherwise match the above conditions. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- list A list of pairs of duplicate reactions based on metabolites. int The number of unique reactions that have a duplicates """ met2mol = map_metabolites_to_structures(model.metabolites, model.compartments) # Build a list associating reactions with their stoichiometry in molecular # structure space. structural = [] for rxn in model.reactions: # Ignore reactions that have metabolites without structures. if not all(met in met2mol for met in rxn.metabolites): continue # We consider substrates and products separately since, for example, # the InChI for H2O and OH is the same. substrates = { met2mol[met]: rxn.get_coefficient(met) for met in rxn.reactants } products = { met2mol[met]: rxn.get_coefficient(met) for met in rxn.products } structural.append((rxn, substrates, products)) # Compare reactions using their structure-based stoichiometries. num_duplicated = set() duplicates = [] for (rxn_a, sub_a, prod_a), (rxn_b, sub_b, prod_b) in combinations( structural, 2): # Compare the substrates. if sub_a != sub_b: continue # Compare the products. if prod_a != prod_b: continue # Compare whether they are both (ir-)reversible. if rxn_a.reversibility != rxn_b.reversibility: continue # TODO (Moritz Beber): We could compare bounds here but it might be # worth knowing about the reactions even if their bounds differ? duplicates.append((rxn_a.id, rxn_b.id)) num_duplicated.add(rxn_a.id) num_duplicated.add(rxn_b.id) return duplicates, len(num_duplicated)
[ "def", "find_duplicate_reactions", "(", "model", ")", ":", "met2mol", "=", "map_metabolites_to_structures", "(", "model", ".", "metabolites", ",", "model", ".", "compartments", ")", "# Build a list associating reactions with their stoichiometry in molecular", "# structure space.", "structural", "=", "[", "]", "for", "rxn", "in", "model", ".", "reactions", ":", "# Ignore reactions that have metabolites without structures.", "if", "not", "all", "(", "met", "in", "met2mol", "for", "met", "in", "rxn", ".", "metabolites", ")", ":", "continue", "# We consider substrates and products separately since, for example,", "# the InChI for H2O and OH is the same.", "substrates", "=", "{", "met2mol", "[", "met", "]", ":", "rxn", ".", "get_coefficient", "(", "met", ")", "for", "met", "in", "rxn", ".", "reactants", "}", "products", "=", "{", "met2mol", "[", "met", "]", ":", "rxn", ".", "get_coefficient", "(", "met", ")", "for", "met", "in", "rxn", ".", "products", "}", "structural", ".", "append", "(", "(", "rxn", ",", "substrates", ",", "products", ")", ")", "# Compare reactions using their structure-based stoichiometries.", "num_duplicated", "=", "set", "(", ")", "duplicates", "=", "[", "]", "for", "(", "rxn_a", ",", "sub_a", ",", "prod_a", ")", ",", "(", "rxn_b", ",", "sub_b", ",", "prod_b", ")", "in", "combinations", "(", "structural", ",", "2", ")", ":", "# Compare the substrates.", "if", "sub_a", "!=", "sub_b", ":", "continue", "# Compare the products.", "if", "prod_a", "!=", "prod_b", ":", "continue", "# Compare whether they are both (ir-)reversible.", "if", "rxn_a", ".", "reversibility", "!=", "rxn_b", ".", "reversibility", ":", "continue", "# TODO (Moritz Beber): We could compare bounds here but it might be", "# worth knowing about the reactions even if their bounds differ?", "duplicates", ".", "append", "(", "(", "rxn_a", ".", "id", ",", "rxn_b", ".", "id", ")", ")", "num_duplicated", ".", "add", "(", "rxn_a", ".", "id", ")", "num_duplicated", ".", "add", "(", "rxn_b", ".", "id", ")", "return", "duplicates", ",", "len", "(", "num_duplicated", ")" ]
Return a list with pairs of reactions that are functionally identical. Identify duplicate reactions globally by checking if any two reactions have the same metabolites, same directionality and are in the same compartment. This can be useful to curate merged models or to clean-up bulk model modifications. The heuristic compares reactions in a pairwise manner. For each reaction, the metabolite annotations are checked for a description of the structure (via InChI and InChIKey).If they exist, substrates and products as well as the stoichiometries of any reaction pair are compared. Only reactions where the substrates, products, stoichiometry and reversibility are identical are considered to be duplicates. This test will not be able to identify duplicate reactions if there are no structure annotations. Further, it will report reactions with differing bounds as equal if they otherwise match the above conditions. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- list A list of pairs of duplicate reactions based on metabolites. int The number of unique reactions that have a duplicates
[ "Return", "a", "list", "with", "pairs", "of", "reactions", "that", "are", "functionally", "identical", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/basic.py#L410-L479
opencobra/memote
memote/support/basic.py
find_reactions_with_identical_genes
def find_reactions_with_identical_genes(model): """ Return reactions that have identical genes. Identify duplicate reactions globally by checking if any two reactions have the same genes. This can be useful to curate merged models or to clean-up bulk model modifications, but also to identify promiscuous enzymes. The heuristic compares reactions in a pairwise manner and reports on reaction pairs whose genes are identical. Reactions with missing genes are skipped. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- dict A mapping from sets of genes to all the reactions containing those genes. int The total number of unique reactions that appear duplicates based on their gene-protein-reaction associations. """ duplicates = dict() for rxn_a, rxn_b in combinations(model.reactions, 2): if not (rxn_a.genes and rxn_b.genes): continue if rxn_a.genes == rxn_b.genes: # This works because the `genes` are frozen sets. identifiers = rxn_a.genes duplicates.setdefault(identifiers, set()).update( [rxn_a.id, rxn_b.id]) # Transform the object for JSON compatibility num_duplicated = set() duplicated = {} for key in duplicates: # Object keys must be strings in JSON. new_key = ",".join(sorted(g.id for g in key)) duplicated[new_key] = rxns = list(duplicates[key]) num_duplicated.update(rxns) return duplicated, len(num_duplicated)
python
def find_reactions_with_identical_genes(model): """ Return reactions that have identical genes. Identify duplicate reactions globally by checking if any two reactions have the same genes. This can be useful to curate merged models or to clean-up bulk model modifications, but also to identify promiscuous enzymes. The heuristic compares reactions in a pairwise manner and reports on reaction pairs whose genes are identical. Reactions with missing genes are skipped. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- dict A mapping from sets of genes to all the reactions containing those genes. int The total number of unique reactions that appear duplicates based on their gene-protein-reaction associations. """ duplicates = dict() for rxn_a, rxn_b in combinations(model.reactions, 2): if not (rxn_a.genes and rxn_b.genes): continue if rxn_a.genes == rxn_b.genes: # This works because the `genes` are frozen sets. identifiers = rxn_a.genes duplicates.setdefault(identifiers, set()).update( [rxn_a.id, rxn_b.id]) # Transform the object for JSON compatibility num_duplicated = set() duplicated = {} for key in duplicates: # Object keys must be strings in JSON. new_key = ",".join(sorted(g.id for g in key)) duplicated[new_key] = rxns = list(duplicates[key]) num_duplicated.update(rxns) return duplicated, len(num_duplicated)
[ "def", "find_reactions_with_identical_genes", "(", "model", ")", ":", "duplicates", "=", "dict", "(", ")", "for", "rxn_a", ",", "rxn_b", "in", "combinations", "(", "model", ".", "reactions", ",", "2", ")", ":", "if", "not", "(", "rxn_a", ".", "genes", "and", "rxn_b", ".", "genes", ")", ":", "continue", "if", "rxn_a", ".", "genes", "==", "rxn_b", ".", "genes", ":", "# This works because the `genes` are frozen sets.", "identifiers", "=", "rxn_a", ".", "genes", "duplicates", ".", "setdefault", "(", "identifiers", ",", "set", "(", ")", ")", ".", "update", "(", "[", "rxn_a", ".", "id", ",", "rxn_b", ".", "id", "]", ")", "# Transform the object for JSON compatibility", "num_duplicated", "=", "set", "(", ")", "duplicated", "=", "{", "}", "for", "key", "in", "duplicates", ":", "# Object keys must be strings in JSON.", "new_key", "=", "\",\"", ".", "join", "(", "sorted", "(", "g", ".", "id", "for", "g", "in", "key", ")", ")", "duplicated", "[", "new_key", "]", "=", "rxns", "=", "list", "(", "duplicates", "[", "key", "]", ")", "num_duplicated", ".", "update", "(", "rxns", ")", "return", "duplicated", ",", "len", "(", "num_duplicated", ")" ]
Return reactions that have identical genes. Identify duplicate reactions globally by checking if any two reactions have the same genes. This can be useful to curate merged models or to clean-up bulk model modifications, but also to identify promiscuous enzymes. The heuristic compares reactions in a pairwise manner and reports on reaction pairs whose genes are identical. Reactions with missing genes are skipped. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- dict A mapping from sets of genes to all the reactions containing those genes. int The total number of unique reactions that appear duplicates based on their gene-protein-reaction associations.
[ "Return", "reactions", "that", "have", "identical", "genes", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/basic.py#L482-L526
opencobra/memote
memote/support/basic.py
find_medium_metabolites
def find_medium_metabolites(model): """Return the list of metabolites ingested/excreted by the model.""" return [met.id for rxn in model.medium for met in model.reactions.get_by_id(rxn).metabolites]
python
def find_medium_metabolites(model): """Return the list of metabolites ingested/excreted by the model.""" return [met.id for rxn in model.medium for met in model.reactions.get_by_id(rxn).metabolites]
[ "def", "find_medium_metabolites", "(", "model", ")", ":", "return", "[", "met", ".", "id", "for", "rxn", "in", "model", ".", "medium", "for", "met", "in", "model", ".", "reactions", ".", "get_by_id", "(", "rxn", ")", ".", "metabolites", "]" ]
Return the list of metabolites ingested/excreted by the model.
[ "Return", "the", "list", "of", "metabolites", "ingested", "/", "excreted", "by", "the", "model", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/basic.py#L535-L538
opencobra/memote
memote/support/basic.py
find_external_metabolites
def find_external_metabolites(model): """Return all metabolites in the external compartment.""" ex_comp = find_external_compartment(model) return [met for met in model.metabolites if met.compartment == ex_comp]
python
def find_external_metabolites(model): """Return all metabolites in the external compartment.""" ex_comp = find_external_compartment(model) return [met for met in model.metabolites if met.compartment == ex_comp]
[ "def", "find_external_metabolites", "(", "model", ")", ":", "ex_comp", "=", "find_external_compartment", "(", "model", ")", "return", "[", "met", "for", "met", "in", "model", ".", "metabolites", "if", "met", ".", "compartment", "==", "ex_comp", "]" ]
Return all metabolites in the external compartment.
[ "Return", "all", "metabolites", "in", "the", "external", "compartment", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/basic.py#L541-L544
opencobra/memote
memote/suite/results/result_manager.py
ResultManager.store
def store(self, result, filename, pretty=True): """ Write a result to the given file. Parameters ---------- result : memote.MemoteResult The dictionary structure of results. filename : str or pathlib.Path Store results directly to the given filename. pretty : bool, optional Whether (default) or not to write JSON in a more legible format. """ LOGGER.info("Storing result in '%s'.", filename) if filename.endswith(".gz"): with gzip.open(filename, "wb") as file_handle: file_handle.write( jsonify(result, pretty=pretty).encode("utf-8") ) else: with open(filename, "w", encoding="utf-8") as file_handle: file_handle.write(jsonify(result, pretty=pretty))
python
def store(self, result, filename, pretty=True): """ Write a result to the given file. Parameters ---------- result : memote.MemoteResult The dictionary structure of results. filename : str or pathlib.Path Store results directly to the given filename. pretty : bool, optional Whether (default) or not to write JSON in a more legible format. """ LOGGER.info("Storing result in '%s'.", filename) if filename.endswith(".gz"): with gzip.open(filename, "wb") as file_handle: file_handle.write( jsonify(result, pretty=pretty).encode("utf-8") ) else: with open(filename, "w", encoding="utf-8") as file_handle: file_handle.write(jsonify(result, pretty=pretty))
[ "def", "store", "(", "self", ",", "result", ",", "filename", ",", "pretty", "=", "True", ")", ":", "LOGGER", ".", "info", "(", "\"Storing result in '%s'.\"", ",", "filename", ")", "if", "filename", ".", "endswith", "(", "\".gz\"", ")", ":", "with", "gzip", ".", "open", "(", "filename", ",", "\"wb\"", ")", "as", "file_handle", ":", "file_handle", ".", "write", "(", "jsonify", "(", "result", ",", "pretty", "=", "pretty", ")", ".", "encode", "(", "\"utf-8\"", ")", ")", "else", ":", "with", "open", "(", "filename", ",", "\"w\"", ",", "encoding", "=", "\"utf-8\"", ")", "as", "file_handle", ":", "file_handle", ".", "write", "(", "jsonify", "(", "result", ",", "pretty", "=", "pretty", ")", ")" ]
Write a result to the given file. Parameters ---------- result : memote.MemoteResult The dictionary structure of results. filename : str or pathlib.Path Store results directly to the given filename. pretty : bool, optional Whether (default) or not to write JSON in a more legible format.
[ "Write", "a", "result", "to", "the", "given", "file", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/suite/results/result_manager.py#L42-L64
opencobra/memote
memote/suite/results/result_manager.py
ResultManager.load
def load(self, filename): """Load a result from the given JSON file.""" LOGGER.info("Loading result from '%s'.", filename) if filename.endswith(".gz"): with gzip.open(filename, "rb") as file_handle: result = MemoteResult( json.loads(file_handle.read().decode("utf-8")) ) else: with open(filename, "r", encoding="utf-8") as file_handle: result = MemoteResult(json.load(file_handle)) # TODO (Moritz Beber): Validate the read-in JSON maybe? Trade-off # between extra time taken and correctness. Maybe we re-visit this # issue when there was a new JSON format version needed. return result
python
def load(self, filename): """Load a result from the given JSON file.""" LOGGER.info("Loading result from '%s'.", filename) if filename.endswith(".gz"): with gzip.open(filename, "rb") as file_handle: result = MemoteResult( json.loads(file_handle.read().decode("utf-8")) ) else: with open(filename, "r", encoding="utf-8") as file_handle: result = MemoteResult(json.load(file_handle)) # TODO (Moritz Beber): Validate the read-in JSON maybe? Trade-off # between extra time taken and correctness. Maybe we re-visit this # issue when there was a new JSON format version needed. return result
[ "def", "load", "(", "self", ",", "filename", ")", ":", "LOGGER", ".", "info", "(", "\"Loading result from '%s'.\"", ",", "filename", ")", "if", "filename", ".", "endswith", "(", "\".gz\"", ")", ":", "with", "gzip", ".", "open", "(", "filename", ",", "\"rb\"", ")", "as", "file_handle", ":", "result", "=", "MemoteResult", "(", "json", ".", "loads", "(", "file_handle", ".", "read", "(", ")", ".", "decode", "(", "\"utf-8\"", ")", ")", ")", "else", ":", "with", "open", "(", "filename", ",", "\"r\"", ",", "encoding", "=", "\"utf-8\"", ")", "as", "file_handle", ":", "result", "=", "MemoteResult", "(", "json", ".", "load", "(", "file_handle", ")", ")", "# TODO (Moritz Beber): Validate the read-in JSON maybe? Trade-off", "# between extra time taken and correctness. Maybe we re-visit this", "# issue when there was a new JSON format version needed.", "return", "result" ]
Load a result from the given JSON file.
[ "Load", "a", "result", "from", "the", "given", "JSON", "file", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/suite/results/result_manager.py#L66-L80
opencobra/memote
memote/support/validation.py
load_cobra_model
def load_cobra_model(path, notifications): """Load a COBRA model with meta information from an SBML document.""" doc = libsbml.readSBML(path) fbc = doc.getPlugin("fbc") sbml_ver = doc.getLevel(), doc.getVersion(), fbc if fbc is None else \ fbc.getVersion() with catch_warnings(record=True) as warnings: simplefilter("always") try: model = read_sbml_model(path) except Exception as err: notifications['errors'].append(str(err)) model = None validate = True else: validate = False notifications['warnings'].extend([str(w.message) for w in warnings]) if validate: run_sbml_validation(doc, notifications) return model, sbml_ver
python
def load_cobra_model(path, notifications): """Load a COBRA model with meta information from an SBML document.""" doc = libsbml.readSBML(path) fbc = doc.getPlugin("fbc") sbml_ver = doc.getLevel(), doc.getVersion(), fbc if fbc is None else \ fbc.getVersion() with catch_warnings(record=True) as warnings: simplefilter("always") try: model = read_sbml_model(path) except Exception as err: notifications['errors'].append(str(err)) model = None validate = True else: validate = False notifications['warnings'].extend([str(w.message) for w in warnings]) if validate: run_sbml_validation(doc, notifications) return model, sbml_ver
[ "def", "load_cobra_model", "(", "path", ",", "notifications", ")", ":", "doc", "=", "libsbml", ".", "readSBML", "(", "path", ")", "fbc", "=", "doc", ".", "getPlugin", "(", "\"fbc\"", ")", "sbml_ver", "=", "doc", ".", "getLevel", "(", ")", ",", "doc", ".", "getVersion", "(", ")", ",", "fbc", "if", "fbc", "is", "None", "else", "fbc", ".", "getVersion", "(", ")", "with", "catch_warnings", "(", "record", "=", "True", ")", "as", "warnings", ":", "simplefilter", "(", "\"always\"", ")", "try", ":", "model", "=", "read_sbml_model", "(", "path", ")", "except", "Exception", "as", "err", ":", "notifications", "[", "'errors'", "]", ".", "append", "(", "str", "(", "err", ")", ")", "model", "=", "None", "validate", "=", "True", "else", ":", "validate", "=", "False", "notifications", "[", "'warnings'", "]", ".", "extend", "(", "[", "str", "(", "w", ".", "message", ")", "for", "w", "in", "warnings", "]", ")", "if", "validate", ":", "run_sbml_validation", "(", "doc", ",", "notifications", ")", "return", "model", ",", "sbml_ver" ]
Load a COBRA model with meta information from an SBML document.
[ "Load", "a", "COBRA", "model", "with", "meta", "information", "from", "an", "SBML", "document", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/validation.py#L30-L49
opencobra/memote
memote/support/validation.py
format_failure
def format_failure(failure): """Format how an error or warning should be displayed.""" return "Line {}, Column {} - #{}: {} - Category: {}, Severity: {}".format( failure.getLine(), failure.getColumn(), failure.getErrorId(), failure.getMessage(), failure.getCategoryAsString(), failure.getSeverity() )
python
def format_failure(failure): """Format how an error or warning should be displayed.""" return "Line {}, Column {} - #{}: {} - Category: {}, Severity: {}".format( failure.getLine(), failure.getColumn(), failure.getErrorId(), failure.getMessage(), failure.getCategoryAsString(), failure.getSeverity() )
[ "def", "format_failure", "(", "failure", ")", ":", "return", "\"Line {}, Column {} - #{}: {} - Category: {}, Severity: {}\"", ".", "format", "(", "failure", ".", "getLine", "(", ")", ",", "failure", ".", "getColumn", "(", ")", ",", "failure", ".", "getErrorId", "(", ")", ",", "failure", ".", "getMessage", "(", ")", ",", "failure", ".", "getCategoryAsString", "(", ")", ",", "failure", ".", "getSeverity", "(", ")", ")" ]
Format how an error or warning should be displayed.
[ "Format", "how", "an", "error", "or", "warning", "should", "be", "displayed", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/validation.py#L52-L61
opencobra/memote
memote/support/validation.py
run_sbml_validation
def run_sbml_validation(document, notifications): """Report errors and warnings found in an SBML document.""" validator = libsbml.SBMLValidator() validator.validate(document) for i in range(document.getNumErrors()): notifications['errors'].append(format_failure(document.getError(i))) for i in range(validator.getNumFailures()): failure = validator.getFailure(i) if failure.isWarning(): notifications['warnings'].append(format_failure(failure)) else: notifications['errors'].append(format_failure(failure))
python
def run_sbml_validation(document, notifications): """Report errors and warnings found in an SBML document.""" validator = libsbml.SBMLValidator() validator.validate(document) for i in range(document.getNumErrors()): notifications['errors'].append(format_failure(document.getError(i))) for i in range(validator.getNumFailures()): failure = validator.getFailure(i) if failure.isWarning(): notifications['warnings'].append(format_failure(failure)) else: notifications['errors'].append(format_failure(failure))
[ "def", "run_sbml_validation", "(", "document", ",", "notifications", ")", ":", "validator", "=", "libsbml", ".", "SBMLValidator", "(", ")", "validator", ".", "validate", "(", "document", ")", "for", "i", "in", "range", "(", "document", ".", "getNumErrors", "(", ")", ")", ":", "notifications", "[", "'errors'", "]", ".", "append", "(", "format_failure", "(", "document", ".", "getError", "(", "i", ")", ")", ")", "for", "i", "in", "range", "(", "validator", ".", "getNumFailures", "(", ")", ")", ":", "failure", "=", "validator", ".", "getFailure", "(", "i", ")", "if", "failure", ".", "isWarning", "(", ")", ":", "notifications", "[", "'warnings'", "]", ".", "append", "(", "format_failure", "(", "failure", ")", ")", "else", ":", "notifications", "[", "'errors'", "]", ".", "append", "(", "format_failure", "(", "failure", ")", ")" ]
Report errors and warnings found in an SBML document.
[ "Report", "errors", "and", "warnings", "found", "in", "an", "SBML", "document", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/validation.py#L64-L75
opencobra/memote
memote/suite/results/sql_result_manager.py
SQLResultManager.store
def store(self, result, commit=None, **kwargs): """ Store a result in a JSON file attaching git meta information. Parameters ---------- result : memote.MemoteResult The dictionary structure of results. commit : str, optional Unique hexsha of the desired commit. kwargs : Passed to parent function. """ git_info = self.record_git_info(commit) try: row = self.session.query(Result). \ filter_by(hexsha=git_info.hexsha). \ one() LOGGER.info("Updating result '%s'.", git_info.hexsha) row.memote_result = result except NoResultFound: row = Result(memote_result=result) LOGGER.info("Storing result '%s'.", git_info.hexsha) row.hexsha = git_info.hexsha row.author = git_info.author row.email = git_info.email row.authored_on = git_info.authored_on self.session.add(row) self.session.commit()
python
def store(self, result, commit=None, **kwargs): """ Store a result in a JSON file attaching git meta information. Parameters ---------- result : memote.MemoteResult The dictionary structure of results. commit : str, optional Unique hexsha of the desired commit. kwargs : Passed to parent function. """ git_info = self.record_git_info(commit) try: row = self.session.query(Result). \ filter_by(hexsha=git_info.hexsha). \ one() LOGGER.info("Updating result '%s'.", git_info.hexsha) row.memote_result = result except NoResultFound: row = Result(memote_result=result) LOGGER.info("Storing result '%s'.", git_info.hexsha) row.hexsha = git_info.hexsha row.author = git_info.author row.email = git_info.email row.authored_on = git_info.authored_on self.session.add(row) self.session.commit()
[ "def", "store", "(", "self", ",", "result", ",", "commit", "=", "None", ",", "*", "*", "kwargs", ")", ":", "git_info", "=", "self", ".", "record_git_info", "(", "commit", ")", "try", ":", "row", "=", "self", ".", "session", ".", "query", "(", "Result", ")", ".", "filter_by", "(", "hexsha", "=", "git_info", ".", "hexsha", ")", ".", "one", "(", ")", "LOGGER", ".", "info", "(", "\"Updating result '%s'.\"", ",", "git_info", ".", "hexsha", ")", "row", ".", "memote_result", "=", "result", "except", "NoResultFound", ":", "row", "=", "Result", "(", "memote_result", "=", "result", ")", "LOGGER", ".", "info", "(", "\"Storing result '%s'.\"", ",", "git_info", ".", "hexsha", ")", "row", ".", "hexsha", "=", "git_info", ".", "hexsha", "row", ".", "author", "=", "git_info", ".", "author", "row", ".", "email", "=", "git_info", ".", "email", "row", ".", "authored_on", "=", "git_info", ".", "authored_on", "self", ".", "session", ".", "add", "(", "row", ")", "self", ".", "session", ".", "commit", "(", ")" ]
Store a result in a JSON file attaching git meta information. Parameters ---------- result : memote.MemoteResult The dictionary structure of results. commit : str, optional Unique hexsha of the desired commit. kwargs : Passed to parent function.
[ "Store", "a", "result", "in", "a", "JSON", "file", "attaching", "git", "meta", "information", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/suite/results/sql_result_manager.py#L58-L87
opencobra/memote
memote/suite/results/sql_result_manager.py
SQLResultManager.load
def load(self, commit=None): """Load a result from the database.""" git_info = self.record_git_info(commit) LOGGER.info("Loading result from '%s'.", git_info.hexsha) result = MemoteResult( self.session.query(Result.memote_result). filter_by(hexsha=git_info.hexsha). one().memote_result) # Add git info so the object is equivalent to the one returned by the # RepoResultManager. self.add_git(result.meta, git_info) return result
python
def load(self, commit=None): """Load a result from the database.""" git_info = self.record_git_info(commit) LOGGER.info("Loading result from '%s'.", git_info.hexsha) result = MemoteResult( self.session.query(Result.memote_result). filter_by(hexsha=git_info.hexsha). one().memote_result) # Add git info so the object is equivalent to the one returned by the # RepoResultManager. self.add_git(result.meta, git_info) return result
[ "def", "load", "(", "self", ",", "commit", "=", "None", ")", ":", "git_info", "=", "self", ".", "record_git_info", "(", "commit", ")", "LOGGER", ".", "info", "(", "\"Loading result from '%s'.\"", ",", "git_info", ".", "hexsha", ")", "result", "=", "MemoteResult", "(", "self", ".", "session", ".", "query", "(", "Result", ".", "memote_result", ")", ".", "filter_by", "(", "hexsha", "=", "git_info", ".", "hexsha", ")", ".", "one", "(", ")", ".", "memote_result", ")", "# Add git info so the object is equivalent to the one returned by the", "# RepoResultManager.", "self", ".", "add_git", "(", "result", ".", "meta", ",", "git_info", ")", "return", "result" ]
Load a result from the database.
[ "Load", "a", "result", "from", "the", "database", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/suite/results/sql_result_manager.py#L89-L100
opencobra/memote
memote/suite/reporting/history.py
HistoryReport.collect_history
def collect_history(self): """Build the structure of results in terms of a commit history.""" def format_data(data): """Format result data according to the user-defined type.""" # TODO Remove this failsafe once proper error handling is in place. if type == "percent" or data is None: # Return an empty list here to reduce the output file size. # The angular report will ignore the `data` and instead display # the `metric`. return [] if type == "count": return len(data) return data base = dict() tests = base.setdefault("tests", dict()) score = base.setdefault("score", dict()) score_collection = score.setdefault("total_score", dict()) for branch, commits in self._history.iter_branches(): for commit in reversed(commits): result = self.result = self._history.get_result(commit) # Calculate the score for each result and store all the total # scores for each commit in the base dictionary. self.compute_score() total_score = self.result["score"]["total_score"] score_collection.setdefault("history", list()) score_collection["format_type"] = "score" score_collection["history"].append({ "branch": branch, "commit": commit, "metric": total_score}) # Now arrange the results for each test into the appropriate # format. Specifically such that the Accordion and the Vega # Plot components can easily read them. for test in result.cases: tests.setdefault(test, dict()) if "title" not in tests[test]: tests[test]["title"] = result.cases[test]["title"] if "summary" not in tests[test]: tests[test]["summary"] = result.cases[test]["summary"] if "type" not in tests[test]: tests[test]["format_type"] = result.cases[test][ "format_type"] type = tests[test]["format_type"] metric = result.cases[test].get("metric") data = result.cases[test].get("data") res = result.cases[test].get("result") if isinstance(metric, dict): tests[test].setdefault("history", dict()) for param in metric: tests[test]["history"].setdefault(param, list()). \ append({ "branch": branch, "commit": commit, "metric": metric.get(param), "data": format_data(data.get(param)), "result": res.get(param)}) else: tests[test].setdefault("history", list()).append({ "branch": branch, "commit": commit, "metric": metric, "data": format_data(data), "result": res }) return base
python
def collect_history(self): """Build the structure of results in terms of a commit history.""" def format_data(data): """Format result data according to the user-defined type.""" # TODO Remove this failsafe once proper error handling is in place. if type == "percent" or data is None: # Return an empty list here to reduce the output file size. # The angular report will ignore the `data` and instead display # the `metric`. return [] if type == "count": return len(data) return data base = dict() tests = base.setdefault("tests", dict()) score = base.setdefault("score", dict()) score_collection = score.setdefault("total_score", dict()) for branch, commits in self._history.iter_branches(): for commit in reversed(commits): result = self.result = self._history.get_result(commit) # Calculate the score for each result and store all the total # scores for each commit in the base dictionary. self.compute_score() total_score = self.result["score"]["total_score"] score_collection.setdefault("history", list()) score_collection["format_type"] = "score" score_collection["history"].append({ "branch": branch, "commit": commit, "metric": total_score}) # Now arrange the results for each test into the appropriate # format. Specifically such that the Accordion and the Vega # Plot components can easily read them. for test in result.cases: tests.setdefault(test, dict()) if "title" not in tests[test]: tests[test]["title"] = result.cases[test]["title"] if "summary" not in tests[test]: tests[test]["summary"] = result.cases[test]["summary"] if "type" not in tests[test]: tests[test]["format_type"] = result.cases[test][ "format_type"] type = tests[test]["format_type"] metric = result.cases[test].get("metric") data = result.cases[test].get("data") res = result.cases[test].get("result") if isinstance(metric, dict): tests[test].setdefault("history", dict()) for param in metric: tests[test]["history"].setdefault(param, list()). \ append({ "branch": branch, "commit": commit, "metric": metric.get(param), "data": format_data(data.get(param)), "result": res.get(param)}) else: tests[test].setdefault("history", list()).append({ "branch": branch, "commit": commit, "metric": metric, "data": format_data(data), "result": res }) return base
[ "def", "collect_history", "(", "self", ")", ":", "def", "format_data", "(", "data", ")", ":", "\"\"\"Format result data according to the user-defined type.\"\"\"", "# TODO Remove this failsafe once proper error handling is in place.", "if", "type", "==", "\"percent\"", "or", "data", "is", "None", ":", "# Return an empty list here to reduce the output file size.", "# The angular report will ignore the `data` and instead display", "# the `metric`.", "return", "[", "]", "if", "type", "==", "\"count\"", ":", "return", "len", "(", "data", ")", "return", "data", "base", "=", "dict", "(", ")", "tests", "=", "base", ".", "setdefault", "(", "\"tests\"", ",", "dict", "(", ")", ")", "score", "=", "base", ".", "setdefault", "(", "\"score\"", ",", "dict", "(", ")", ")", "score_collection", "=", "score", ".", "setdefault", "(", "\"total_score\"", ",", "dict", "(", ")", ")", "for", "branch", ",", "commits", "in", "self", ".", "_history", ".", "iter_branches", "(", ")", ":", "for", "commit", "in", "reversed", "(", "commits", ")", ":", "result", "=", "self", ".", "result", "=", "self", ".", "_history", ".", "get_result", "(", "commit", ")", "# Calculate the score for each result and store all the total", "# scores for each commit in the base dictionary.", "self", ".", "compute_score", "(", ")", "total_score", "=", "self", ".", "result", "[", "\"score\"", "]", "[", "\"total_score\"", "]", "score_collection", ".", "setdefault", "(", "\"history\"", ",", "list", "(", ")", ")", "score_collection", "[", "\"format_type\"", "]", "=", "\"score\"", "score_collection", "[", "\"history\"", "]", ".", "append", "(", "{", "\"branch\"", ":", "branch", ",", "\"commit\"", ":", "commit", ",", "\"metric\"", ":", "total_score", "}", ")", "# Now arrange the results for each test into the appropriate", "# format. Specifically such that the Accordion and the Vega", "# Plot components can easily read them.", "for", "test", "in", "result", ".", "cases", ":", "tests", ".", "setdefault", "(", "test", ",", "dict", "(", ")", ")", "if", "\"title\"", "not", "in", "tests", "[", "test", "]", ":", "tests", "[", "test", "]", "[", "\"title\"", "]", "=", "result", ".", "cases", "[", "test", "]", "[", "\"title\"", "]", "if", "\"summary\"", "not", "in", "tests", "[", "test", "]", ":", "tests", "[", "test", "]", "[", "\"summary\"", "]", "=", "result", ".", "cases", "[", "test", "]", "[", "\"summary\"", "]", "if", "\"type\"", "not", "in", "tests", "[", "test", "]", ":", "tests", "[", "test", "]", "[", "\"format_type\"", "]", "=", "result", ".", "cases", "[", "test", "]", "[", "\"format_type\"", "]", "type", "=", "tests", "[", "test", "]", "[", "\"format_type\"", "]", "metric", "=", "result", ".", "cases", "[", "test", "]", ".", "get", "(", "\"metric\"", ")", "data", "=", "result", ".", "cases", "[", "test", "]", ".", "get", "(", "\"data\"", ")", "res", "=", "result", ".", "cases", "[", "test", "]", ".", "get", "(", "\"result\"", ")", "if", "isinstance", "(", "metric", ",", "dict", ")", ":", "tests", "[", "test", "]", ".", "setdefault", "(", "\"history\"", ",", "dict", "(", ")", ")", "for", "param", "in", "metric", ":", "tests", "[", "test", "]", "[", "\"history\"", "]", ".", "setdefault", "(", "param", ",", "list", "(", ")", ")", ".", "append", "(", "{", "\"branch\"", ":", "branch", ",", "\"commit\"", ":", "commit", ",", "\"metric\"", ":", "metric", ".", "get", "(", "param", ")", ",", "\"data\"", ":", "format_data", "(", "data", ".", "get", "(", "param", ")", ")", ",", "\"result\"", ":", "res", ".", "get", "(", "param", ")", "}", ")", "else", ":", "tests", "[", "test", "]", ".", "setdefault", "(", "\"history\"", ",", "list", "(", ")", ")", ".", "append", "(", "{", "\"branch\"", ":", "branch", ",", "\"commit\"", ":", "commit", ",", "\"metric\"", ":", "metric", ",", "\"data\"", ":", "format_data", "(", "data", ")", ",", "\"result\"", ":", "res", "}", ")", "return", "base" ]
Build the structure of results in terms of a commit history.
[ "Build", "the", "structure", "of", "results", "in", "terms", "of", "a", "commit", "history", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/suite/reporting/history.py#L61-L126
opencobra/memote
memote/suite/reporting/diff.py
DiffReport.format_and_score_diff_data
def format_and_score_diff_data(self, diff_results): """Reformat the api results to work with the front-end.""" base = dict() meta = base.setdefault('meta', dict()) tests = base.setdefault('tests', dict()) score = base.setdefault('score', dict()) for model_filename, result in iteritems(diff_results): if meta == dict(): meta = result["meta"] for test_id, test_results in iteritems(result["tests"]): tests.setdefault(test_id, dict()) if tests[test_id] == dict(): tests[test_id]["summary"] = test_results["summary"] tests[test_id]["title"] = test_results["title"] tests[test_id]["format_type"] = test_results["format_type"] if isinstance(test_results["metric"], dict): tests[test_id].setdefault("diff", dict()) for param in test_results["metric"]: tests[test_id]["diff"].setdefault(param, list()). \ append({ "model": model_filename, "data": test_results["data"].setdefault(param), "duration": test_results["duration"].setdefault(param), "message": test_results["message"].setdefault(param), "metric": test_results["metric"].setdefault(param), "result": test_results["result"].setdefault(param)}) else: tests[test_id].setdefault("diff", list()) tests[test_id]["diff"].append({ "model": model_filename, "data": test_results.setdefault("data"), "duration": test_results.setdefault("duration"), "message": test_results.setdefault("message"), "metric": test_results.setdefault("metric"), "result": test_results.setdefault("result")}) self.result = result self.compute_score() score.setdefault('total_score', dict()).setdefault('diff', list()) score.setdefault('sections', dict()).setdefault('diff', list()) score['total_score']['diff'].append({ "model": model_filename, "total_score": self.result['score']['total_score']}) for section in self.result['score']['sections']: section.update({"model": model_filename}) score['sections']['diff'].append(section) return base
python
def format_and_score_diff_data(self, diff_results): """Reformat the api results to work with the front-end.""" base = dict() meta = base.setdefault('meta', dict()) tests = base.setdefault('tests', dict()) score = base.setdefault('score', dict()) for model_filename, result in iteritems(diff_results): if meta == dict(): meta = result["meta"] for test_id, test_results in iteritems(result["tests"]): tests.setdefault(test_id, dict()) if tests[test_id] == dict(): tests[test_id]["summary"] = test_results["summary"] tests[test_id]["title"] = test_results["title"] tests[test_id]["format_type"] = test_results["format_type"] if isinstance(test_results["metric"], dict): tests[test_id].setdefault("diff", dict()) for param in test_results["metric"]: tests[test_id]["diff"].setdefault(param, list()). \ append({ "model": model_filename, "data": test_results["data"].setdefault(param), "duration": test_results["duration"].setdefault(param), "message": test_results["message"].setdefault(param), "metric": test_results["metric"].setdefault(param), "result": test_results["result"].setdefault(param)}) else: tests[test_id].setdefault("diff", list()) tests[test_id]["diff"].append({ "model": model_filename, "data": test_results.setdefault("data"), "duration": test_results.setdefault("duration"), "message": test_results.setdefault("message"), "metric": test_results.setdefault("metric"), "result": test_results.setdefault("result")}) self.result = result self.compute_score() score.setdefault('total_score', dict()).setdefault('diff', list()) score.setdefault('sections', dict()).setdefault('diff', list()) score['total_score']['diff'].append({ "model": model_filename, "total_score": self.result['score']['total_score']}) for section in self.result['score']['sections']: section.update({"model": model_filename}) score['sections']['diff'].append(section) return base
[ "def", "format_and_score_diff_data", "(", "self", ",", "diff_results", ")", ":", "base", "=", "dict", "(", ")", "meta", "=", "base", ".", "setdefault", "(", "'meta'", ",", "dict", "(", ")", ")", "tests", "=", "base", ".", "setdefault", "(", "'tests'", ",", "dict", "(", ")", ")", "score", "=", "base", ".", "setdefault", "(", "'score'", ",", "dict", "(", ")", ")", "for", "model_filename", ",", "result", "in", "iteritems", "(", "diff_results", ")", ":", "if", "meta", "==", "dict", "(", ")", ":", "meta", "=", "result", "[", "\"meta\"", "]", "for", "test_id", ",", "test_results", "in", "iteritems", "(", "result", "[", "\"tests\"", "]", ")", ":", "tests", ".", "setdefault", "(", "test_id", ",", "dict", "(", ")", ")", "if", "tests", "[", "test_id", "]", "==", "dict", "(", ")", ":", "tests", "[", "test_id", "]", "[", "\"summary\"", "]", "=", "test_results", "[", "\"summary\"", "]", "tests", "[", "test_id", "]", "[", "\"title\"", "]", "=", "test_results", "[", "\"title\"", "]", "tests", "[", "test_id", "]", "[", "\"format_type\"", "]", "=", "test_results", "[", "\"format_type\"", "]", "if", "isinstance", "(", "test_results", "[", "\"metric\"", "]", ",", "dict", ")", ":", "tests", "[", "test_id", "]", ".", "setdefault", "(", "\"diff\"", ",", "dict", "(", ")", ")", "for", "param", "in", "test_results", "[", "\"metric\"", "]", ":", "tests", "[", "test_id", "]", "[", "\"diff\"", "]", ".", "setdefault", "(", "param", ",", "list", "(", ")", ")", ".", "append", "(", "{", "\"model\"", ":", "model_filename", ",", "\"data\"", ":", "test_results", "[", "\"data\"", "]", ".", "setdefault", "(", "param", ")", ",", "\"duration\"", ":", "test_results", "[", "\"duration\"", "]", ".", "setdefault", "(", "param", ")", ",", "\"message\"", ":", "test_results", "[", "\"message\"", "]", ".", "setdefault", "(", "param", ")", ",", "\"metric\"", ":", "test_results", "[", "\"metric\"", "]", ".", "setdefault", "(", "param", ")", ",", "\"result\"", ":", "test_results", "[", "\"result\"", "]", ".", "setdefault", "(", "param", ")", "}", ")", "else", ":", "tests", "[", "test_id", "]", ".", "setdefault", "(", "\"diff\"", ",", "list", "(", ")", ")", "tests", "[", "test_id", "]", "[", "\"diff\"", "]", ".", "append", "(", "{", "\"model\"", ":", "model_filename", ",", "\"data\"", ":", "test_results", ".", "setdefault", "(", "\"data\"", ")", ",", "\"duration\"", ":", "test_results", ".", "setdefault", "(", "\"duration\"", ")", ",", "\"message\"", ":", "test_results", ".", "setdefault", "(", "\"message\"", ")", ",", "\"metric\"", ":", "test_results", ".", "setdefault", "(", "\"metric\"", ")", ",", "\"result\"", ":", "test_results", ".", "setdefault", "(", "\"result\"", ")", "}", ")", "self", ".", "result", "=", "result", "self", ".", "compute_score", "(", ")", "score", ".", "setdefault", "(", "'total_score'", ",", "dict", "(", ")", ")", ".", "setdefault", "(", "'diff'", ",", "list", "(", ")", ")", "score", ".", "setdefault", "(", "'sections'", ",", "dict", "(", ")", ")", ".", "setdefault", "(", "'diff'", ",", "list", "(", ")", ")", "score", "[", "'total_score'", "]", "[", "'diff'", "]", ".", "append", "(", "{", "\"model\"", ":", "model_filename", ",", "\"total_score\"", ":", "self", ".", "result", "[", "'score'", "]", "[", "'total_score'", "]", "}", ")", "for", "section", "in", "self", ".", "result", "[", "'score'", "]", "[", "'sections'", "]", ":", "section", ".", "update", "(", "{", "\"model\"", ":", "model_filename", "}", ")", "score", "[", "'sections'", "]", "[", "'diff'", "]", ".", "append", "(", "section", ")", "return", "base" ]
Reformat the api results to work with the front-end.
[ "Reformat", "the", "api", "results", "to", "work", "with", "the", "front", "-", "end", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/suite/reporting/diff.py#L48-L97
opencobra/memote
scripts/annotate_mnx_shortlists.py
generate_shortlist
def generate_shortlist(mnx_db, shortlist): """ Create a condensed cross-references format from data in long form. Both data frames must contain a column 'MNX_ID' and the dump is assumed to also have a column 'XREF'. Parameters ---------- mnx_db : pandas.DataFrame The entire MetaNetX dump as a data frame. shortlist : pandas.DataFrame The shortlist of targets as a data frame. Returns ------- pandas.DataFrame A condensed format with MetaNetX identifiers as the column index and database identifiers as the row index. Elements are lists and often have multiple entries. """ # Reduce the whole database to targets of interest. xref = mnx_db.loc[mnx_db["MNX_ID"].isin(shortlist["MNX_ID"]), :] # Drop deprecated MetaNetX identifiers. Disabled for now. # xref = xref.loc[~xref["XREF"].str.startswith("deprecated", na=False), :] # Drop self-references for now since they don't follow the format. xref = xref.loc[xref["XREF"] != xref["MNX_ID"], :] # Split namespaces from identifiers. xref[["XREF_ID", "XREF"]] = xref["XREF"].str.split(":", n=1, expand=True) # Group the data in the xref dataframe so that one MNX ID maps to all # corresponding cross-references from other databases. Then list all # identifiers that belong to these databases: # MNX_ID XREF_ID # MNXM0 chebi [23367, 59999] # metacyc [UNKNOWN] # Make a separate column for every XREF_ID: # MNX_ID chebi metacyc # MNXM0 [23367, 59999] [UNKNOWN] xref = xref.groupby(["MNX_ID", "XREF_ID"], as_index=False, sort=False)[ "XREF"].apply(list).unstack('XREF_ID') # Re-insert MetaNetX identifiers as lists. # FIXME: Shouldn't we use metanetx.chemical here instead of 'mnx'? xref["mnx"] = [[x] for x in xref.index] # Transpose the data frame such that the index are now xref databases and # the column names are MetaNetX identifiers. return xref.T
python
def generate_shortlist(mnx_db, shortlist): """ Create a condensed cross-references format from data in long form. Both data frames must contain a column 'MNX_ID' and the dump is assumed to also have a column 'XREF'. Parameters ---------- mnx_db : pandas.DataFrame The entire MetaNetX dump as a data frame. shortlist : pandas.DataFrame The shortlist of targets as a data frame. Returns ------- pandas.DataFrame A condensed format with MetaNetX identifiers as the column index and database identifiers as the row index. Elements are lists and often have multiple entries. """ # Reduce the whole database to targets of interest. xref = mnx_db.loc[mnx_db["MNX_ID"].isin(shortlist["MNX_ID"]), :] # Drop deprecated MetaNetX identifiers. Disabled for now. # xref = xref.loc[~xref["XREF"].str.startswith("deprecated", na=False), :] # Drop self-references for now since they don't follow the format. xref = xref.loc[xref["XREF"] != xref["MNX_ID"], :] # Split namespaces from identifiers. xref[["XREF_ID", "XREF"]] = xref["XREF"].str.split(":", n=1, expand=True) # Group the data in the xref dataframe so that one MNX ID maps to all # corresponding cross-references from other databases. Then list all # identifiers that belong to these databases: # MNX_ID XREF_ID # MNXM0 chebi [23367, 59999] # metacyc [UNKNOWN] # Make a separate column for every XREF_ID: # MNX_ID chebi metacyc # MNXM0 [23367, 59999] [UNKNOWN] xref = xref.groupby(["MNX_ID", "XREF_ID"], as_index=False, sort=False)[ "XREF"].apply(list).unstack('XREF_ID') # Re-insert MetaNetX identifiers as lists. # FIXME: Shouldn't we use metanetx.chemical here instead of 'mnx'? xref["mnx"] = [[x] for x in xref.index] # Transpose the data frame such that the index are now xref databases and # the column names are MetaNetX identifiers. return xref.T
[ "def", "generate_shortlist", "(", "mnx_db", ",", "shortlist", ")", ":", "# Reduce the whole database to targets of interest.", "xref", "=", "mnx_db", ".", "loc", "[", "mnx_db", "[", "\"MNX_ID\"", "]", ".", "isin", "(", "shortlist", "[", "\"MNX_ID\"", "]", ")", ",", ":", "]", "# Drop deprecated MetaNetX identifiers. Disabled for now.", "# xref = xref.loc[~xref[\"XREF\"].str.startswith(\"deprecated\", na=False), :]", "# Drop self-references for now since they don't follow the format.", "xref", "=", "xref", ".", "loc", "[", "xref", "[", "\"XREF\"", "]", "!=", "xref", "[", "\"MNX_ID\"", "]", ",", ":", "]", "# Split namespaces from identifiers.", "xref", "[", "[", "\"XREF_ID\"", ",", "\"XREF\"", "]", "]", "=", "xref", "[", "\"XREF\"", "]", ".", "str", ".", "split", "(", "\":\"", ",", "n", "=", "1", ",", "expand", "=", "True", ")", "# Group the data in the xref dataframe so that one MNX ID maps to all", "# corresponding cross-references from other databases. Then list all", "# identifiers that belong to these databases:", "# MNX_ID XREF_ID", "# MNXM0 chebi [23367, 59999]", "# metacyc [UNKNOWN]", "# Make a separate column for every XREF_ID:", "# MNX_ID chebi metacyc", "# MNXM0 [23367, 59999] [UNKNOWN]", "xref", "=", "xref", ".", "groupby", "(", "[", "\"MNX_ID\"", ",", "\"XREF_ID\"", "]", ",", "as_index", "=", "False", ",", "sort", "=", "False", ")", "[", "\"XREF\"", "]", ".", "apply", "(", "list", ")", ".", "unstack", "(", "'XREF_ID'", ")", "# Re-insert MetaNetX identifiers as lists.", "# FIXME: Shouldn't we use metanetx.chemical here instead of 'mnx'?", "xref", "[", "\"mnx\"", "]", "=", "[", "[", "x", "]", "for", "x", "in", "xref", ".", "index", "]", "# Transpose the data frame such that the index are now xref databases and", "# the column names are MetaNetX identifiers.", "return", "xref", ".", "T" ]
Create a condensed cross-references format from data in long form. Both data frames must contain a column 'MNX_ID' and the dump is assumed to also have a column 'XREF'. Parameters ---------- mnx_db : pandas.DataFrame The entire MetaNetX dump as a data frame. shortlist : pandas.DataFrame The shortlist of targets as a data frame. Returns ------- pandas.DataFrame A condensed format with MetaNetX identifiers as the column index and database identifiers as the row index. Elements are lists and often have multiple entries.
[ "Create", "a", "condensed", "cross", "-", "references", "format", "from", "data", "in", "long", "form", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/scripts/annotate_mnx_shortlists.py#L50-L96
opencobra/memote
scripts/annotate_mnx_shortlists.py
generate
def generate(mnx_dump): """ Annotate a shortlist of metabolites with cross-references using MetaNetX. MNX_DUMP : The chemicals dump from MetaNetX usually called 'chem_xref.tsv'. Will be downloaded if it doesn't exist. """ LOGGER.info("Read shortlist.") targets = pd.read_table(join(dirname(__file__), "shortlist.tsv")) if not exists(mnx_dump): # Download the MetaNetX chemicals dump if it doesn't exists. # Download done as per https://stackoverflow.com/a/16696317. LOGGER.info("MetaNetX dump '%s' does not exist. Downloading...", mnx_dump) with open(mnx_dump, "wb") as file_handle, \ get("https://www.metanetx.org/cgi-bin/mnxget/mnxref/chem_xref.tsv", stream=True) as stream: for chunk in stream.iter_content(chunk_size=1024): file_handle.write(chunk) LOGGER.info("Done.") LOGGER.info("Read the MetaNetX dump with cross-references.") db = pd.read_table(mnx_dump, comment='#', names=['XREF', 'MNX_ID', 'Evidence', 'Description']) LOGGER.info("Generate the shortlist cross-references.") res = generate_shortlist(db, targets) LOGGER.info("Save result.") res.to_json(join(dirname(__file__), pardir, "memote", "support", "data", "met_id_shortlist.json"), force_ascii=False)
python
def generate(mnx_dump): """ Annotate a shortlist of metabolites with cross-references using MetaNetX. MNX_DUMP : The chemicals dump from MetaNetX usually called 'chem_xref.tsv'. Will be downloaded if it doesn't exist. """ LOGGER.info("Read shortlist.") targets = pd.read_table(join(dirname(__file__), "shortlist.tsv")) if not exists(mnx_dump): # Download the MetaNetX chemicals dump if it doesn't exists. # Download done as per https://stackoverflow.com/a/16696317. LOGGER.info("MetaNetX dump '%s' does not exist. Downloading...", mnx_dump) with open(mnx_dump, "wb") as file_handle, \ get("https://www.metanetx.org/cgi-bin/mnxget/mnxref/chem_xref.tsv", stream=True) as stream: for chunk in stream.iter_content(chunk_size=1024): file_handle.write(chunk) LOGGER.info("Done.") LOGGER.info("Read the MetaNetX dump with cross-references.") db = pd.read_table(mnx_dump, comment='#', names=['XREF', 'MNX_ID', 'Evidence', 'Description']) LOGGER.info("Generate the shortlist cross-references.") res = generate_shortlist(db, targets) LOGGER.info("Save result.") res.to_json(join(dirname(__file__), pardir, "memote", "support", "data", "met_id_shortlist.json"), force_ascii=False)
[ "def", "generate", "(", "mnx_dump", ")", ":", "LOGGER", ".", "info", "(", "\"Read shortlist.\"", ")", "targets", "=", "pd", ".", "read_table", "(", "join", "(", "dirname", "(", "__file__", ")", ",", "\"shortlist.tsv\"", ")", ")", "if", "not", "exists", "(", "mnx_dump", ")", ":", "# Download the MetaNetX chemicals dump if it doesn't exists.", "# Download done as per https://stackoverflow.com/a/16696317.", "LOGGER", ".", "info", "(", "\"MetaNetX dump '%s' does not exist. Downloading...\"", ",", "mnx_dump", ")", "with", "open", "(", "mnx_dump", ",", "\"wb\"", ")", "as", "file_handle", ",", "get", "(", "\"https://www.metanetx.org/cgi-bin/mnxget/mnxref/chem_xref.tsv\"", ",", "stream", "=", "True", ")", "as", "stream", ":", "for", "chunk", "in", "stream", ".", "iter_content", "(", "chunk_size", "=", "1024", ")", ":", "file_handle", ".", "write", "(", "chunk", ")", "LOGGER", ".", "info", "(", "\"Done.\"", ")", "LOGGER", ".", "info", "(", "\"Read the MetaNetX dump with cross-references.\"", ")", "db", "=", "pd", ".", "read_table", "(", "mnx_dump", ",", "comment", "=", "'#'", ",", "names", "=", "[", "'XREF'", ",", "'MNX_ID'", ",", "'Evidence'", ",", "'Description'", "]", ")", "LOGGER", ".", "info", "(", "\"Generate the shortlist cross-references.\"", ")", "res", "=", "generate_shortlist", "(", "db", ",", "targets", ")", "LOGGER", ".", "info", "(", "\"Save result.\"", ")", "res", ".", "to_json", "(", "join", "(", "dirname", "(", "__file__", ")", ",", "pardir", ",", "\"memote\"", ",", "\"support\"", ",", "\"data\"", ",", "\"met_id_shortlist.json\"", ")", ",", "force_ascii", "=", "False", ")" ]
Annotate a shortlist of metabolites with cross-references using MetaNetX. MNX_DUMP : The chemicals dump from MetaNetX usually called 'chem_xref.tsv'. Will be downloaded if it doesn't exist.
[ "Annotate", "a", "shortlist", "of", "metabolites", "with", "cross", "-", "references", "using", "MetaNetX", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/scripts/annotate_mnx_shortlists.py#L105-L133
opencobra/memote
memote/experimental/essentiality.py
EssentialityExperiment.validate
def validate(self, model, checks=[]): """Use a defined schema to validate the medium table format.""" custom = [ check_partial(gene_id_check, frozenset(g.id for g in model.genes)) ] super(EssentialityExperiment, self).validate( model=model, checks=checks + custom)
python
def validate(self, model, checks=[]): """Use a defined schema to validate the medium table format.""" custom = [ check_partial(gene_id_check, frozenset(g.id for g in model.genes)) ] super(EssentialityExperiment, self).validate( model=model, checks=checks + custom)
[ "def", "validate", "(", "self", ",", "model", ",", "checks", "=", "[", "]", ")", ":", "custom", "=", "[", "check_partial", "(", "gene_id_check", ",", "frozenset", "(", "g", ".", "id", "for", "g", "in", "model", ".", "genes", ")", ")", "]", "super", "(", "EssentialityExperiment", ",", "self", ")", ".", "validate", "(", "model", "=", "model", ",", "checks", "=", "checks", "+", "custom", ")" ]
Use a defined schema to validate the medium table format.
[ "Use", "a", "defined", "schema", "to", "validate", "the", "medium", "table", "format", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/experimental/essentiality.py#L70-L76
opencobra/memote
memote/experimental/essentiality.py
EssentialityExperiment.evaluate
def evaluate(self, model): """Use the defined parameters to predict single gene essentiality.""" with model: if self.medium is not None: self.medium.apply(model) if self.objective is not None: model.objective = self.objective model.add_cons_vars(self.constraints) max_val = model.slim_optimize() essen = single_gene_deletion( model, gene_list=self.data["gene"], processes=1) essen["gene"] = [list(g)[0] for g in essen.index] essen.index = essen["gene"] essen["essential"] = (essen["growth"] < (max_val * 0.1)) \ | essen["growth"].isna() return essen
python
def evaluate(self, model): """Use the defined parameters to predict single gene essentiality.""" with model: if self.medium is not None: self.medium.apply(model) if self.objective is not None: model.objective = self.objective model.add_cons_vars(self.constraints) max_val = model.slim_optimize() essen = single_gene_deletion( model, gene_list=self.data["gene"], processes=1) essen["gene"] = [list(g)[0] for g in essen.index] essen.index = essen["gene"] essen["essential"] = (essen["growth"] < (max_val * 0.1)) \ | essen["growth"].isna() return essen
[ "def", "evaluate", "(", "self", ",", "model", ")", ":", "with", "model", ":", "if", "self", ".", "medium", "is", "not", "None", ":", "self", ".", "medium", ".", "apply", "(", "model", ")", "if", "self", ".", "objective", "is", "not", "None", ":", "model", ".", "objective", "=", "self", ".", "objective", "model", ".", "add_cons_vars", "(", "self", ".", "constraints", ")", "max_val", "=", "model", ".", "slim_optimize", "(", ")", "essen", "=", "single_gene_deletion", "(", "model", ",", "gene_list", "=", "self", ".", "data", "[", "\"gene\"", "]", ",", "processes", "=", "1", ")", "essen", "[", "\"gene\"", "]", "=", "[", "list", "(", "g", ")", "[", "0", "]", "for", "g", "in", "essen", ".", "index", "]", "essen", ".", "index", "=", "essen", "[", "\"gene\"", "]", "essen", "[", "\"essential\"", "]", "=", "(", "essen", "[", "\"growth\"", "]", "<", "(", "max_val", "*", "0.1", ")", ")", "|", "essen", "[", "\"growth\"", "]", ".", "isna", "(", ")", "return", "essen" ]
Use the defined parameters to predict single gene essentiality.
[ "Use", "the", "defined", "parameters", "to", "predict", "single", "gene", "essentiality", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/experimental/essentiality.py#L78-L93
opencobra/memote
memote/utils.py
register_with
def register_with(registry): """ Register a passed in object. Intended to be used as a decorator on model building functions with a ``dict`` as a registry. Examples -------- .. code-block:: python REGISTRY = dict() @register_with(REGISTRY) def build_empty(base): return base """ def decorator(func): registry[func.__name__] = func return func return decorator
python
def register_with(registry): """ Register a passed in object. Intended to be used as a decorator on model building functions with a ``dict`` as a registry. Examples -------- .. code-block:: python REGISTRY = dict() @register_with(REGISTRY) def build_empty(base): return base """ def decorator(func): registry[func.__name__] = func return func return decorator
[ "def", "register_with", "(", "registry", ")", ":", "def", "decorator", "(", "func", ")", ":", "registry", "[", "func", ".", "__name__", "]", "=", "func", "return", "func", "return", "decorator" ]
Register a passed in object. Intended to be used as a decorator on model building functions with a ``dict`` as a registry. Examples -------- .. code-block:: python REGISTRY = dict() @register_with(REGISTRY) def build_empty(base): return base
[ "Register", "a", "passed", "in", "object", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/utils.py#L48-L68
opencobra/memote
memote/utils.py
annotate
def annotate(title, format_type, message=None, data=None, metric=1.0): """ Annotate a test case with info that should be displayed in the reports. Parameters ---------- title : str A human-readable descriptive title of the test case. format_type : str A string that determines how the result data is formatted in the report. It is expected not to be None. * 'number' : 'data' is a single number which can be an integer or float and should be represented as such. * 'count' : 'data' is a list, set or tuple. Choosing 'count' will display the length of that list e.g. number of metabolites without formula. * 'percent' : Instead of 'data' the content of 'metric' ought to be displayed e.g. percentage of metabolites without charge. 'metric' is expected to be a floating point number. * 'raw' : 'data' is ought to be displayed "as is" without formatting. This option is appropriate for single strings or a boolean output. message : str A short written explanation that states and possibly explains the test result. data Raw data which the test case generates and assesses. Can be of the following types: list, set, tuple, string, float, integer, and boolean. metric: float A value x in the range of 0 <= x <= 1 which represents the fraction of 'data' to the total in the model. For example, if 'data' are all metabolites without formula, 'metric' should be the fraction of metabolites without formula from the total of metabolites in the model. Returns ------- function The decorated function, now extended by the attribute 'annotation'. Notes ----- Adds "annotation" attribute to the function object, which stores values for predefined keys as a dictionary. """ if format_type not in TYPES: raise ValueError( "Invalid type. Expected one of: {}.".format(", ".join(TYPES))) def decorator(func): func.annotation = dict( title=title, summary=extended_summary(func), message=message, data=data, format_type=format_type, metric=metric) return func return decorator
python
def annotate(title, format_type, message=None, data=None, metric=1.0): """ Annotate a test case with info that should be displayed in the reports. Parameters ---------- title : str A human-readable descriptive title of the test case. format_type : str A string that determines how the result data is formatted in the report. It is expected not to be None. * 'number' : 'data' is a single number which can be an integer or float and should be represented as such. * 'count' : 'data' is a list, set or tuple. Choosing 'count' will display the length of that list e.g. number of metabolites without formula. * 'percent' : Instead of 'data' the content of 'metric' ought to be displayed e.g. percentage of metabolites without charge. 'metric' is expected to be a floating point number. * 'raw' : 'data' is ought to be displayed "as is" without formatting. This option is appropriate for single strings or a boolean output. message : str A short written explanation that states and possibly explains the test result. data Raw data which the test case generates and assesses. Can be of the following types: list, set, tuple, string, float, integer, and boolean. metric: float A value x in the range of 0 <= x <= 1 which represents the fraction of 'data' to the total in the model. For example, if 'data' are all metabolites without formula, 'metric' should be the fraction of metabolites without formula from the total of metabolites in the model. Returns ------- function The decorated function, now extended by the attribute 'annotation'. Notes ----- Adds "annotation" attribute to the function object, which stores values for predefined keys as a dictionary. """ if format_type not in TYPES: raise ValueError( "Invalid type. Expected one of: {}.".format(", ".join(TYPES))) def decorator(func): func.annotation = dict( title=title, summary=extended_summary(func), message=message, data=data, format_type=format_type, metric=metric) return func return decorator
[ "def", "annotate", "(", "title", ",", "format_type", ",", "message", "=", "None", ",", "data", "=", "None", ",", "metric", "=", "1.0", ")", ":", "if", "format_type", "not", "in", "TYPES", ":", "raise", "ValueError", "(", "\"Invalid type. Expected one of: {}.\"", ".", "format", "(", "\", \"", ".", "join", "(", "TYPES", ")", ")", ")", "def", "decorator", "(", "func", ")", ":", "func", ".", "annotation", "=", "dict", "(", "title", "=", "title", ",", "summary", "=", "extended_summary", "(", "func", ")", ",", "message", "=", "message", ",", "data", "=", "data", ",", "format_type", "=", "format_type", ",", "metric", "=", "metric", ")", "return", "func", "return", "decorator" ]
Annotate a test case with info that should be displayed in the reports. Parameters ---------- title : str A human-readable descriptive title of the test case. format_type : str A string that determines how the result data is formatted in the report. It is expected not to be None. * 'number' : 'data' is a single number which can be an integer or float and should be represented as such. * 'count' : 'data' is a list, set or tuple. Choosing 'count' will display the length of that list e.g. number of metabolites without formula. * 'percent' : Instead of 'data' the content of 'metric' ought to be displayed e.g. percentage of metabolites without charge. 'metric' is expected to be a floating point number. * 'raw' : 'data' is ought to be displayed "as is" without formatting. This option is appropriate for single strings or a boolean output. message : str A short written explanation that states and possibly explains the test result. data Raw data which the test case generates and assesses. Can be of the following types: list, set, tuple, string, float, integer, and boolean. metric: float A value x in the range of 0 <= x <= 1 which represents the fraction of 'data' to the total in the model. For example, if 'data' are all metabolites without formula, 'metric' should be the fraction of metabolites without formula from the total of metabolites in the model. Returns ------- function The decorated function, now extended by the attribute 'annotation'. Notes ----- Adds "annotation" attribute to the function object, which stores values for predefined keys as a dictionary.
[ "Annotate", "a", "test", "case", "with", "info", "that", "should", "be", "displayed", "in", "the", "reports", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/utils.py#L71-L130
opencobra/memote
memote/utils.py
truncate
def truncate(sequence): """ Create a potentially shortened text display of a list. Parameters ---------- sequence : list An indexable sequence of elements. Returns ------- str The list as a formatted string. """ if len(sequence) > LIST_SLICE: return ", ".join(sequence[:LIST_SLICE] + ["..."]) else: return ", ".join(sequence)
python
def truncate(sequence): """ Create a potentially shortened text display of a list. Parameters ---------- sequence : list An indexable sequence of elements. Returns ------- str The list as a formatted string. """ if len(sequence) > LIST_SLICE: return ", ".join(sequence[:LIST_SLICE] + ["..."]) else: return ", ".join(sequence)
[ "def", "truncate", "(", "sequence", ")", ":", "if", "len", "(", "sequence", ")", ">", "LIST_SLICE", ":", "return", "\", \"", ".", "join", "(", "sequence", "[", ":", "LIST_SLICE", "]", "+", "[", "\"...\"", "]", ")", "else", ":", "return", "\", \"", ".", "join", "(", "sequence", ")" ]
Create a potentially shortened text display of a list. Parameters ---------- sequence : list An indexable sequence of elements. Returns ------- str The list as a formatted string.
[ "Create", "a", "potentially", "shortened", "text", "display", "of", "a", "list", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/utils.py#L152-L170
opencobra/memote
memote/utils.py
log_json_incompatible_types
def log_json_incompatible_types(obj): """ Log types that are not JSON compatible. Explore a nested dictionary structure and log types that are not JSON compatible. Parameters ---------- obj : dict A potentially nested dictionary. """ keys_to_explore = list(obj) while len(keys_to_explore) > 0: key = keys_to_explore.pop() if not isinstance(key, str): LOGGER.info(type(key)) value = obj[key] if isinstance(value, dict): LOGGER.info("%s:", key) log_json_incompatible_types(value) elif not isinstance(value, JSON_TYPES): LOGGER.info("%s: %s", key, type(value)) elif isinstance(value, (int, float)) and not isfinite(value): LOGGER.info("%s: %f", key, value)
python
def log_json_incompatible_types(obj): """ Log types that are not JSON compatible. Explore a nested dictionary structure and log types that are not JSON compatible. Parameters ---------- obj : dict A potentially nested dictionary. """ keys_to_explore = list(obj) while len(keys_to_explore) > 0: key = keys_to_explore.pop() if not isinstance(key, str): LOGGER.info(type(key)) value = obj[key] if isinstance(value, dict): LOGGER.info("%s:", key) log_json_incompatible_types(value) elif not isinstance(value, JSON_TYPES): LOGGER.info("%s: %s", key, type(value)) elif isinstance(value, (int, float)) and not isfinite(value): LOGGER.info("%s: %f", key, value)
[ "def", "log_json_incompatible_types", "(", "obj", ")", ":", "keys_to_explore", "=", "list", "(", "obj", ")", "while", "len", "(", "keys_to_explore", ")", ">", "0", ":", "key", "=", "keys_to_explore", ".", "pop", "(", ")", "if", "not", "isinstance", "(", "key", ",", "str", ")", ":", "LOGGER", ".", "info", "(", "type", "(", "key", ")", ")", "value", "=", "obj", "[", "key", "]", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "LOGGER", ".", "info", "(", "\"%s:\"", ",", "key", ")", "log_json_incompatible_types", "(", "value", ")", "elif", "not", "isinstance", "(", "value", ",", "JSON_TYPES", ")", ":", "LOGGER", ".", "info", "(", "\"%s: %s\"", ",", "key", ",", "type", "(", "value", ")", ")", "elif", "isinstance", "(", "value", ",", "(", "int", ",", "float", ")", ")", "and", "not", "isfinite", "(", "value", ")", ":", "LOGGER", ".", "info", "(", "\"%s: %f\"", ",", "key", ",", "value", ")" ]
Log types that are not JSON compatible. Explore a nested dictionary structure and log types that are not JSON compatible. Parameters ---------- obj : dict A potentially nested dictionary.
[ "Log", "types", "that", "are", "not", "JSON", "compatible", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/utils.py#L173-L198
opencobra/memote
memote/utils.py
jsonify
def jsonify(obj, pretty=False): """ Turn a nested object into a (compressed) JSON string. Parameters ---------- obj : dict Any kind of dictionary structure. pretty : bool, optional Whether to format the resulting JSON in a more legible way ( default False). """ if pretty: params = dict(sort_keys=True, indent=2, allow_nan=False, separators=(",", ": "), ensure_ascii=False) else: params = dict(sort_keys=False, indent=None, allow_nan=False, separators=(",", ":"), ensure_ascii=False) try: return json.dumps(obj, **params) except (TypeError, ValueError) as error: LOGGER.critical( "The memote result structure is incompatible with the JSON " "standard.") log_json_incompatible_types(obj) raise_with_traceback(error)
python
def jsonify(obj, pretty=False): """ Turn a nested object into a (compressed) JSON string. Parameters ---------- obj : dict Any kind of dictionary structure. pretty : bool, optional Whether to format the resulting JSON in a more legible way ( default False). """ if pretty: params = dict(sort_keys=True, indent=2, allow_nan=False, separators=(",", ": "), ensure_ascii=False) else: params = dict(sort_keys=False, indent=None, allow_nan=False, separators=(",", ":"), ensure_ascii=False) try: return json.dumps(obj, **params) except (TypeError, ValueError) as error: LOGGER.critical( "The memote result structure is incompatible with the JSON " "standard.") log_json_incompatible_types(obj) raise_with_traceback(error)
[ "def", "jsonify", "(", "obj", ",", "pretty", "=", "False", ")", ":", "if", "pretty", ":", "params", "=", "dict", "(", "sort_keys", "=", "True", ",", "indent", "=", "2", ",", "allow_nan", "=", "False", ",", "separators", "=", "(", "\",\"", ",", "\": \"", ")", ",", "ensure_ascii", "=", "False", ")", "else", ":", "params", "=", "dict", "(", "sort_keys", "=", "False", ",", "indent", "=", "None", ",", "allow_nan", "=", "False", ",", "separators", "=", "(", "\",\"", ",", "\":\"", ")", ",", "ensure_ascii", "=", "False", ")", "try", ":", "return", "json", ".", "dumps", "(", "obj", ",", "*", "*", "params", ")", "except", "(", "TypeError", ",", "ValueError", ")", "as", "error", ":", "LOGGER", ".", "critical", "(", "\"The memote result structure is incompatible with the JSON \"", "\"standard.\"", ")", "log_json_incompatible_types", "(", "obj", ")", "raise_with_traceback", "(", "error", ")" ]
Turn a nested object into a (compressed) JSON string. Parameters ---------- obj : dict Any kind of dictionary structure. pretty : bool, optional Whether to format the resulting JSON in a more legible way ( default False).
[ "Turn", "a", "nested", "object", "into", "a", "(", "compressed", ")", "JSON", "string", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/utils.py#L225-L251
opencobra/memote
memote/utils.py
flatten
def flatten(list_of_lists): """Flatten a list of lists but maintain strings and ints as entries.""" flat_list = [] for sublist in list_of_lists: if isinstance(sublist, string_types) or isinstance(sublist, int): flat_list.append(sublist) elif sublist is None: continue elif not isinstance(sublist, string_types) and len(sublist) == 1: flat_list.append(sublist[0]) else: flat_list.append(tuple(sublist)) return flat_list
python
def flatten(list_of_lists): """Flatten a list of lists but maintain strings and ints as entries.""" flat_list = [] for sublist in list_of_lists: if isinstance(sublist, string_types) or isinstance(sublist, int): flat_list.append(sublist) elif sublist is None: continue elif not isinstance(sublist, string_types) and len(sublist) == 1: flat_list.append(sublist[0]) else: flat_list.append(tuple(sublist)) return flat_list
[ "def", "flatten", "(", "list_of_lists", ")", ":", "flat_list", "=", "[", "]", "for", "sublist", "in", "list_of_lists", ":", "if", "isinstance", "(", "sublist", ",", "string_types", ")", "or", "isinstance", "(", "sublist", ",", "int", ")", ":", "flat_list", ".", "append", "(", "sublist", ")", "elif", "sublist", "is", "None", ":", "continue", "elif", "not", "isinstance", "(", "sublist", ",", "string_types", ")", "and", "len", "(", "sublist", ")", "==", "1", ":", "flat_list", ".", "append", "(", "sublist", "[", "0", "]", ")", "else", ":", "flat_list", ".", "append", "(", "tuple", "(", "sublist", ")", ")", "return", "flat_list" ]
Flatten a list of lists but maintain strings and ints as entries.
[ "Flatten", "a", "list", "of", "lists", "but", "maintain", "strings", "and", "ints", "as", "entries", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/utils.py#L254-L266
opencobra/memote
memote/utils.py
stdout_notifications
def stdout_notifications(notifications): """ Print each entry of errors and warnings to stdout. Parameters ---------- notifications: dict A simple dictionary structure containing a list of errors and warnings. """ for error in notifications["errors"]: LOGGER.error(error) for warn in notifications["warnings"]: LOGGER.warning(warn)
python
def stdout_notifications(notifications): """ Print each entry of errors and warnings to stdout. Parameters ---------- notifications: dict A simple dictionary structure containing a list of errors and warnings. """ for error in notifications["errors"]: LOGGER.error(error) for warn in notifications["warnings"]: LOGGER.warning(warn)
[ "def", "stdout_notifications", "(", "notifications", ")", ":", "for", "error", "in", "notifications", "[", "\"errors\"", "]", ":", "LOGGER", ".", "error", "(", "error", ")", "for", "warn", "in", "notifications", "[", "\"warnings\"", "]", ":", "LOGGER", ".", "warning", "(", "warn", ")" ]
Print each entry of errors and warnings to stdout. Parameters ---------- notifications: dict A simple dictionary structure containing a list of errors and warnings.
[ "Print", "each", "entry", "of", "errors", "and", "warnings", "to", "stdout", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/utils.py#L289-L302
opencobra/memote
memote/experimental/experimental_base.py
ExperimentalBase.load
def load(self, dtype_conversion=None): """ Load the data table and corresponding validation schema. Parameters ---------- dtype_conversion : dict Column names as keys and corresponding type for loading the data. Please take a look at the `pandas documentation <https://pandas.pydata.org/pandas-docs/stable/io.html#specifying-column-data-types>`__ for detailed explanations. """ self.data = read_tabular(self.filename, dtype_conversion) with open_text(memote.experimental.schemata, self.SCHEMA, encoding="utf-8") as file_handle: self.schema = json.load(file_handle)
python
def load(self, dtype_conversion=None): """ Load the data table and corresponding validation schema. Parameters ---------- dtype_conversion : dict Column names as keys and corresponding type for loading the data. Please take a look at the `pandas documentation <https://pandas.pydata.org/pandas-docs/stable/io.html#specifying-column-data-types>`__ for detailed explanations. """ self.data = read_tabular(self.filename, dtype_conversion) with open_text(memote.experimental.schemata, self.SCHEMA, encoding="utf-8") as file_handle: self.schema = json.load(file_handle)
[ "def", "load", "(", "self", ",", "dtype_conversion", "=", "None", ")", ":", "self", ".", "data", "=", "read_tabular", "(", "self", ".", "filename", ",", "dtype_conversion", ")", "with", "open_text", "(", "memote", ".", "experimental", ".", "schemata", ",", "self", ".", "SCHEMA", ",", "encoding", "=", "\"utf-8\"", ")", "as", "file_handle", ":", "self", ".", "schema", "=", "json", ".", "load", "(", "file_handle", ")" ]
Load the data table and corresponding validation schema. Parameters ---------- dtype_conversion : dict Column names as keys and corresponding type for loading the data. Please take a look at the `pandas documentation <https://pandas.pydata.org/pandas-docs/stable/io.html#specifying-column-data-types>`__ for detailed explanations.
[ "Load", "the", "data", "table", "and", "corresponding", "validation", "schema", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/experimental/experimental_base.py#L72-L88
opencobra/memote
memote/experimental/experimental_base.py
ExperimentalBase.validate
def validate(self, model, checks=[]): """Use a defined schema to validate the given table.""" records = self.data.to_dict("records") self.evaluate_report( validate(records, headers=list(records[0]), preset='table', schema=self.schema, order_fields=True, custom_checks=checks))
python
def validate(self, model, checks=[]): """Use a defined schema to validate the given table.""" records = self.data.to_dict("records") self.evaluate_report( validate(records, headers=list(records[0]), preset='table', schema=self.schema, order_fields=True, custom_checks=checks))
[ "def", "validate", "(", "self", ",", "model", ",", "checks", "=", "[", "]", ")", ":", "records", "=", "self", ".", "data", ".", "to_dict", "(", "\"records\"", ")", "self", ".", "evaluate_report", "(", "validate", "(", "records", ",", "headers", "=", "list", "(", "records", "[", "0", "]", ")", ",", "preset", "=", "'table'", ",", "schema", "=", "self", ".", "schema", ",", "order_fields", "=", "True", ",", "custom_checks", "=", "checks", ")", ")" ]
Use a defined schema to validate the given table.
[ "Use", "a", "defined", "schema", "to", "validate", "the", "given", "table", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/experimental/experimental_base.py#L90-L96
opencobra/memote
memote/experimental/experimental_base.py
ExperimentalBase.evaluate_report
def evaluate_report(report): """Iterate over validation errors.""" if report["valid"]: return for warn in report["warnings"]: LOGGER.warning(warn) # We only ever test one table at a time. for err in report["tables"][0]["errors"]: LOGGER.error(err["message"]) raise ValueError("Invalid data file. Please see errors above.")
python
def evaluate_report(report): """Iterate over validation errors.""" if report["valid"]: return for warn in report["warnings"]: LOGGER.warning(warn) # We only ever test one table at a time. for err in report["tables"][0]["errors"]: LOGGER.error(err["message"]) raise ValueError("Invalid data file. Please see errors above.")
[ "def", "evaluate_report", "(", "report", ")", ":", "if", "report", "[", "\"valid\"", "]", ":", "return", "for", "warn", "in", "report", "[", "\"warnings\"", "]", ":", "LOGGER", ".", "warning", "(", "warn", ")", "# We only ever test one table at a time.", "for", "err", "in", "report", "[", "\"tables\"", "]", "[", "0", "]", "[", "\"errors\"", "]", ":", "LOGGER", ".", "error", "(", "err", "[", "\"message\"", "]", ")", "raise", "ValueError", "(", "\"Invalid data file. Please see errors above.\"", ")" ]
Iterate over validation errors.
[ "Iterate", "over", "validation", "errors", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/experimental/experimental_base.py#L99-L108
opencobra/memote
memote/support/consistency_helpers.py
add_reaction_constraints
def add_reaction_constraints(model, reactions, Constraint): """ Add the stoichiometric coefficients as constraints. Parameters ---------- model : optlang.Model The transposed stoichiometric matrix representation. reactions : iterable Container of `cobra.Reaction` instances. Constraint : optlang.Constraint The constraint class for the specific interface. """ constraints = [] for rxn in reactions: expression = add( [c * model.variables[m.id] for m, c in rxn.metabolites.items()]) constraints.append(Constraint(expression, lb=0, ub=0, name=rxn.id)) model.add(constraints)
python
def add_reaction_constraints(model, reactions, Constraint): """ Add the stoichiometric coefficients as constraints. Parameters ---------- model : optlang.Model The transposed stoichiometric matrix representation. reactions : iterable Container of `cobra.Reaction` instances. Constraint : optlang.Constraint The constraint class for the specific interface. """ constraints = [] for rxn in reactions: expression = add( [c * model.variables[m.id] for m, c in rxn.metabolites.items()]) constraints.append(Constraint(expression, lb=0, ub=0, name=rxn.id)) model.add(constraints)
[ "def", "add_reaction_constraints", "(", "model", ",", "reactions", ",", "Constraint", ")", ":", "constraints", "=", "[", "]", "for", "rxn", "in", "reactions", ":", "expression", "=", "add", "(", "[", "c", "*", "model", ".", "variables", "[", "m", ".", "id", "]", "for", "m", ",", "c", "in", "rxn", ".", "metabolites", ".", "items", "(", ")", "]", ")", "constraints", ".", "append", "(", "Constraint", "(", "expression", ",", "lb", "=", "0", ",", "ub", "=", "0", ",", "name", "=", "rxn", ".", "id", ")", ")", "model", ".", "add", "(", "constraints", ")" ]
Add the stoichiometric coefficients as constraints. Parameters ---------- model : optlang.Model The transposed stoichiometric matrix representation. reactions : iterable Container of `cobra.Reaction` instances. Constraint : optlang.Constraint The constraint class for the specific interface.
[ "Add", "the", "stoichiometric", "coefficients", "as", "constraints", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/consistency_helpers.py#L43-L62
opencobra/memote
memote/support/consistency_helpers.py
stoichiometry_matrix
def stoichiometry_matrix(metabolites, reactions): """ Return the stoichiometry matrix representation of a set of reactions. The reactions and metabolites order is respected. All metabolites are expected to be contained and complete in terms of the reactions. Parameters ---------- reactions : iterable A somehow ordered list of unique reactions. metabolites : iterable A somehow ordered list of unique metabolites. Returns ------- numpy.array The 2D array that represents the stoichiometry matrix. dict A dictionary mapping metabolites to row indexes. dict A dictionary mapping reactions to column indexes. """ matrix = np.zeros((len(metabolites), len(reactions))) met_index = dict((met, i) for i, met in enumerate(metabolites)) rxn_index = dict() for i, rxn in enumerate(reactions): rxn_index[rxn] = i for met, coef in iteritems(rxn.metabolites): j = met_index[met] matrix[j, i] = coef return matrix, met_index, rxn_index
python
def stoichiometry_matrix(metabolites, reactions): """ Return the stoichiometry matrix representation of a set of reactions. The reactions and metabolites order is respected. All metabolites are expected to be contained and complete in terms of the reactions. Parameters ---------- reactions : iterable A somehow ordered list of unique reactions. metabolites : iterable A somehow ordered list of unique metabolites. Returns ------- numpy.array The 2D array that represents the stoichiometry matrix. dict A dictionary mapping metabolites to row indexes. dict A dictionary mapping reactions to column indexes. """ matrix = np.zeros((len(metabolites), len(reactions))) met_index = dict((met, i) for i, met in enumerate(metabolites)) rxn_index = dict() for i, rxn in enumerate(reactions): rxn_index[rxn] = i for met, coef in iteritems(rxn.metabolites): j = met_index[met] matrix[j, i] = coef return matrix, met_index, rxn_index
[ "def", "stoichiometry_matrix", "(", "metabolites", ",", "reactions", ")", ":", "matrix", "=", "np", ".", "zeros", "(", "(", "len", "(", "metabolites", ")", ",", "len", "(", "reactions", ")", ")", ")", "met_index", "=", "dict", "(", "(", "met", ",", "i", ")", "for", "i", ",", "met", "in", "enumerate", "(", "metabolites", ")", ")", "rxn_index", "=", "dict", "(", ")", "for", "i", ",", "rxn", "in", "enumerate", "(", "reactions", ")", ":", "rxn_index", "[", "rxn", "]", "=", "i", "for", "met", ",", "coef", "in", "iteritems", "(", "rxn", ".", "metabolites", ")", ":", "j", "=", "met_index", "[", "met", "]", "matrix", "[", "j", ",", "i", "]", "=", "coef", "return", "matrix", ",", "met_index", ",", "rxn_index" ]
Return the stoichiometry matrix representation of a set of reactions. The reactions and metabolites order is respected. All metabolites are expected to be contained and complete in terms of the reactions. Parameters ---------- reactions : iterable A somehow ordered list of unique reactions. metabolites : iterable A somehow ordered list of unique metabolites. Returns ------- numpy.array The 2D array that represents the stoichiometry matrix. dict A dictionary mapping metabolites to row indexes. dict A dictionary mapping reactions to column indexes.
[ "Return", "the", "stoichiometry", "matrix", "representation", "of", "a", "set", "of", "reactions", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/consistency_helpers.py#L65-L97
opencobra/memote
memote/support/consistency_helpers.py
rank
def rank(matrix, atol=1e-13, rtol=0): """ Estimate the rank, i.e., the dimension of the column space, of a matrix. The algorithm used by this function is based on the singular value decomposition of `stoichiometry_matrix`. Parameters ---------- matrix : ndarray The matrix should be at most 2-D. A 1-D array with length k will be treated as a 2-D with shape (1, k) atol : float The absolute tolerance for a zero singular value. Singular values smaller than ``atol`` are considered to be zero. rtol : float The relative tolerance for a zero singular value. Singular values less than the relative tolerance times the largest singular value are considered to be zero. Notes ----- If both `atol` and `rtol` are positive, the combined tolerance is the maximum of the two; that is:: tol = max(atol, rtol * smax) Singular values smaller than ``tol`` are considered to be zero. Returns ------- int The estimated rank of the matrix. See Also -------- numpy.linalg.matrix_rank matrix_rank is basically the same as this function, but it does not provide the option of the absolute tolerance. """ matrix = np.atleast_2d(matrix) sigma = svd(matrix, compute_uv=False) tol = max(atol, rtol * sigma[0]) return int((sigma >= tol).sum())
python
def rank(matrix, atol=1e-13, rtol=0): """ Estimate the rank, i.e., the dimension of the column space, of a matrix. The algorithm used by this function is based on the singular value decomposition of `stoichiometry_matrix`. Parameters ---------- matrix : ndarray The matrix should be at most 2-D. A 1-D array with length k will be treated as a 2-D with shape (1, k) atol : float The absolute tolerance for a zero singular value. Singular values smaller than ``atol`` are considered to be zero. rtol : float The relative tolerance for a zero singular value. Singular values less than the relative tolerance times the largest singular value are considered to be zero. Notes ----- If both `atol` and `rtol` are positive, the combined tolerance is the maximum of the two; that is:: tol = max(atol, rtol * smax) Singular values smaller than ``tol`` are considered to be zero. Returns ------- int The estimated rank of the matrix. See Also -------- numpy.linalg.matrix_rank matrix_rank is basically the same as this function, but it does not provide the option of the absolute tolerance. """ matrix = np.atleast_2d(matrix) sigma = svd(matrix, compute_uv=False) tol = max(atol, rtol * sigma[0]) return int((sigma >= tol).sum())
[ "def", "rank", "(", "matrix", ",", "atol", "=", "1e-13", ",", "rtol", "=", "0", ")", ":", "matrix", "=", "np", ".", "atleast_2d", "(", "matrix", ")", "sigma", "=", "svd", "(", "matrix", ",", "compute_uv", "=", "False", ")", "tol", "=", "max", "(", "atol", ",", "rtol", "*", "sigma", "[", "0", "]", ")", "return", "int", "(", "(", "sigma", ">=", "tol", ")", ".", "sum", "(", ")", ")" ]
Estimate the rank, i.e., the dimension of the column space, of a matrix. The algorithm used by this function is based on the singular value decomposition of `stoichiometry_matrix`. Parameters ---------- matrix : ndarray The matrix should be at most 2-D. A 1-D array with length k will be treated as a 2-D with shape (1, k) atol : float The absolute tolerance for a zero singular value. Singular values smaller than ``atol`` are considered to be zero. rtol : float The relative tolerance for a zero singular value. Singular values less than the relative tolerance times the largest singular value are considered to be zero. Notes ----- If both `atol` and `rtol` are positive, the combined tolerance is the maximum of the two; that is:: tol = max(atol, rtol * smax) Singular values smaller than ``tol`` are considered to be zero. Returns ------- int The estimated rank of the matrix. See Also -------- numpy.linalg.matrix_rank matrix_rank is basically the same as this function, but it does not provide the option of the absolute tolerance.
[ "Estimate", "the", "rank", "i", ".", "e", ".", "the", "dimension", "of", "the", "column", "space", "of", "a", "matrix", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/consistency_helpers.py#L100-L144
opencobra/memote
memote/support/consistency_helpers.py
nullspace
def nullspace(matrix, atol=1e-13, rtol=0.0): # noqa: D402 """ Compute an approximate basis for the null space (kernel) of a matrix. The algorithm used by this function is based on the singular value decomposition of the given matrix. Parameters ---------- matrix : ndarray The matrix should be at most 2-D. A 1-D array with length k will be treated as a 2-D with shape (1, k) atol : float The absolute tolerance for a zero singular value. Singular values smaller than ``atol`` are considered to be zero. rtol : float The relative tolerance for a zero singular value. Singular values less than the relative tolerance times the largest singular value are considered to be zero. Notes ----- If both `atol` and `rtol` are positive, the combined tolerance is the maximum of the two; that is:: tol = max(atol, rtol * smax) Singular values smaller than ``tol`` are considered to be zero. Returns ------- ndarray If ``matrix`` is an array with shape (m, k), then the returned nullspace will be an array with shape ``(k, n)``, where n is the estimated dimension of the nullspace. References ---------- Adapted from: https://scipy.github.io/old-wiki/pages/Cookbook/RankNullspace.html """ matrix = np.atleast_2d(matrix) _, sigma, vh = svd(matrix) tol = max(atol, rtol * sigma[0]) num_nonzero = (sigma >= tol).sum() return vh[num_nonzero:].conj().T
python
def nullspace(matrix, atol=1e-13, rtol=0.0): # noqa: D402 """ Compute an approximate basis for the null space (kernel) of a matrix. The algorithm used by this function is based on the singular value decomposition of the given matrix. Parameters ---------- matrix : ndarray The matrix should be at most 2-D. A 1-D array with length k will be treated as a 2-D with shape (1, k) atol : float The absolute tolerance for a zero singular value. Singular values smaller than ``atol`` are considered to be zero. rtol : float The relative tolerance for a zero singular value. Singular values less than the relative tolerance times the largest singular value are considered to be zero. Notes ----- If both `atol` and `rtol` are positive, the combined tolerance is the maximum of the two; that is:: tol = max(atol, rtol * smax) Singular values smaller than ``tol`` are considered to be zero. Returns ------- ndarray If ``matrix`` is an array with shape (m, k), then the returned nullspace will be an array with shape ``(k, n)``, where n is the estimated dimension of the nullspace. References ---------- Adapted from: https://scipy.github.io/old-wiki/pages/Cookbook/RankNullspace.html """ matrix = np.atleast_2d(matrix) _, sigma, vh = svd(matrix) tol = max(atol, rtol * sigma[0]) num_nonzero = (sigma >= tol).sum() return vh[num_nonzero:].conj().T
[ "def", "nullspace", "(", "matrix", ",", "atol", "=", "1e-13", ",", "rtol", "=", "0.0", ")", ":", "# noqa: D402", "matrix", "=", "np", ".", "atleast_2d", "(", "matrix", ")", "_", ",", "sigma", ",", "vh", "=", "svd", "(", "matrix", ")", "tol", "=", "max", "(", "atol", ",", "rtol", "*", "sigma", "[", "0", "]", ")", "num_nonzero", "=", "(", "sigma", ">=", "tol", ")", ".", "sum", "(", ")", "return", "vh", "[", "num_nonzero", ":", "]", ".", "conj", "(", ")", ".", "T" ]
Compute an approximate basis for the null space (kernel) of a matrix. The algorithm used by this function is based on the singular value decomposition of the given matrix. Parameters ---------- matrix : ndarray The matrix should be at most 2-D. A 1-D array with length k will be treated as a 2-D with shape (1, k) atol : float The absolute tolerance for a zero singular value. Singular values smaller than ``atol`` are considered to be zero. rtol : float The relative tolerance for a zero singular value. Singular values less than the relative tolerance times the largest singular value are considered to be zero. Notes ----- If both `atol` and `rtol` are positive, the combined tolerance is the maximum of the two; that is:: tol = max(atol, rtol * smax) Singular values smaller than ``tol`` are considered to be zero. Returns ------- ndarray If ``matrix`` is an array with shape (m, k), then the returned nullspace will be an array with shape ``(k, n)``, where n is the estimated dimension of the nullspace. References ---------- Adapted from: https://scipy.github.io/old-wiki/pages/Cookbook/RankNullspace.html
[ "Compute", "an", "approximate", "basis", "for", "the", "null", "space", "(", "kernel", ")", "of", "a", "matrix", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/consistency_helpers.py#L147-L193
opencobra/memote
memote/support/consistency_helpers.py
get_interface
def get_interface(model): """ Return the interface specific classes. Parameters ---------- model : cobra.Model The metabolic model under investigation. """ return ( model.solver.interface.Model, model.solver.interface.Constraint, model.solver.interface.Variable, model.solver.interface.Objective )
python
def get_interface(model): """ Return the interface specific classes. Parameters ---------- model : cobra.Model The metabolic model under investigation. """ return ( model.solver.interface.Model, model.solver.interface.Constraint, model.solver.interface.Variable, model.solver.interface.Objective )
[ "def", "get_interface", "(", "model", ")", ":", "return", "(", "model", ".", "solver", ".", "interface", ".", "Model", ",", "model", ".", "solver", ".", "interface", ".", "Constraint", ",", "model", ".", "solver", ".", "interface", ".", "Variable", ",", "model", ".", "solver", ".", "interface", ".", "Objective", ")" ]
Return the interface specific classes. Parameters ---------- model : cobra.Model The metabolic model under investigation.
[ "Return", "the", "interface", "specific", "classes", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/consistency_helpers.py#L197-L212
opencobra/memote
memote/support/consistency_helpers.py
get_internals
def get_internals(model): """ Return non-boundary reactions and their metabolites. Boundary reactions are unbalanced by their nature. They are excluded here and only the metabolites of the others are considered. Parameters ---------- model : cobra.Model The metabolic model under investigation. """ biomass = set(find_biomass_reaction(model)) if len(biomass) == 0: LOGGER.warning("No biomass reaction detected. Consistency test results " "are unreliable if one exists.") return set(model.reactions) - (set(model.boundary) | biomass)
python
def get_internals(model): """ Return non-boundary reactions and their metabolites. Boundary reactions are unbalanced by their nature. They are excluded here and only the metabolites of the others are considered. Parameters ---------- model : cobra.Model The metabolic model under investigation. """ biomass = set(find_biomass_reaction(model)) if len(biomass) == 0: LOGGER.warning("No biomass reaction detected. Consistency test results " "are unreliable if one exists.") return set(model.reactions) - (set(model.boundary) | biomass)
[ "def", "get_internals", "(", "model", ")", ":", "biomass", "=", "set", "(", "find_biomass_reaction", "(", "model", ")", ")", "if", "len", "(", "biomass", ")", "==", "0", ":", "LOGGER", ".", "warning", "(", "\"No biomass reaction detected. Consistency test results \"", "\"are unreliable if one exists.\"", ")", "return", "set", "(", "model", ".", "reactions", ")", "-", "(", "set", "(", "model", ".", "boundary", ")", "|", "biomass", ")" ]
Return non-boundary reactions and their metabolites. Boundary reactions are unbalanced by their nature. They are excluded here and only the metabolites of the others are considered. Parameters ---------- model : cobra.Model The metabolic model under investigation.
[ "Return", "non", "-", "boundary", "reactions", "and", "their", "metabolites", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/consistency_helpers.py#L216-L233
opencobra/memote
memote/support/consistency_helpers.py
create_milp_problem
def create_milp_problem(kernel, metabolites, Model, Variable, Constraint, Objective): """ Create the MILP as defined by equation (13) in [1]_. Parameters ---------- kernel : numpy.array A 2-dimensional array that represents the left nullspace of the stoichiometric matrix which is the nullspace of the transpose of the stoichiometric matrix. metabolites : iterable The metabolites in the nullspace. The length of this vector must equal the first dimension of the nullspace. Model : optlang.Model Model class for a specific optlang interface. Variable : optlang.Variable Variable class for a specific optlang interface. Constraint : optlang.Constraint Constraint class for a specific optlang interface. Objective : optlang.Objective Objective class for a specific optlang interface. References ---------- .. [1] Gevorgyan, A., M. G Poolman, and D. A Fell. "Detection of Stoichiometric Inconsistencies in Biomolecular Models." Bioinformatics 24, no. 19 (2008): 2245. """ assert len(metabolites) == kernel.shape[0],\ "metabolite vector and first nullspace dimension must be equal" ns_problem = Model() k_vars = list() for met in metabolites: # The element y[i] of the mass vector. y_var = Variable(met.id) k_var = Variable("k_{}".format(met.id), type="binary") k_vars.append(k_var) ns_problem.add([y_var, k_var]) # This constraint is equivalent to 0 <= y[i] <= k[i]. ns_problem.add(Constraint( y_var - k_var, ub=0, name="switch_{}".format(met.id))) ns_problem.update() # add nullspace constraints for (j, column) in enumerate(kernel.T): expression = sympy.Add( *[coef * ns_problem.variables[met.id] for (met, coef) in zip(metabolites, column) if coef != 0.0]) constraint = Constraint(expression, lb=0, ub=0, name="ns_{}".format(j)) ns_problem.add(constraint) # The objective is to minimize the binary indicators k[i], subject to # the above inequality constraints. ns_problem.objective = Objective(1) ns_problem.objective.set_linear_coefficients( {k_var: 1. for k_var in k_vars}) ns_problem.objective.direction = "min" return ns_problem, k_vars
python
def create_milp_problem(kernel, metabolites, Model, Variable, Constraint, Objective): """ Create the MILP as defined by equation (13) in [1]_. Parameters ---------- kernel : numpy.array A 2-dimensional array that represents the left nullspace of the stoichiometric matrix which is the nullspace of the transpose of the stoichiometric matrix. metabolites : iterable The metabolites in the nullspace. The length of this vector must equal the first dimension of the nullspace. Model : optlang.Model Model class for a specific optlang interface. Variable : optlang.Variable Variable class for a specific optlang interface. Constraint : optlang.Constraint Constraint class for a specific optlang interface. Objective : optlang.Objective Objective class for a specific optlang interface. References ---------- .. [1] Gevorgyan, A., M. G Poolman, and D. A Fell. "Detection of Stoichiometric Inconsistencies in Biomolecular Models." Bioinformatics 24, no. 19 (2008): 2245. """ assert len(metabolites) == kernel.shape[0],\ "metabolite vector and first nullspace dimension must be equal" ns_problem = Model() k_vars = list() for met in metabolites: # The element y[i] of the mass vector. y_var = Variable(met.id) k_var = Variable("k_{}".format(met.id), type="binary") k_vars.append(k_var) ns_problem.add([y_var, k_var]) # This constraint is equivalent to 0 <= y[i] <= k[i]. ns_problem.add(Constraint( y_var - k_var, ub=0, name="switch_{}".format(met.id))) ns_problem.update() # add nullspace constraints for (j, column) in enumerate(kernel.T): expression = sympy.Add( *[coef * ns_problem.variables[met.id] for (met, coef) in zip(metabolites, column) if coef != 0.0]) constraint = Constraint(expression, lb=0, ub=0, name="ns_{}".format(j)) ns_problem.add(constraint) # The objective is to minimize the binary indicators k[i], subject to # the above inequality constraints. ns_problem.objective = Objective(1) ns_problem.objective.set_linear_coefficients( {k_var: 1. for k_var in k_vars}) ns_problem.objective.direction = "min" return ns_problem, k_vars
[ "def", "create_milp_problem", "(", "kernel", ",", "metabolites", ",", "Model", ",", "Variable", ",", "Constraint", ",", "Objective", ")", ":", "assert", "len", "(", "metabolites", ")", "==", "kernel", ".", "shape", "[", "0", "]", ",", "\"metabolite vector and first nullspace dimension must be equal\"", "ns_problem", "=", "Model", "(", ")", "k_vars", "=", "list", "(", ")", "for", "met", "in", "metabolites", ":", "# The element y[i] of the mass vector.", "y_var", "=", "Variable", "(", "met", ".", "id", ")", "k_var", "=", "Variable", "(", "\"k_{}\"", ".", "format", "(", "met", ".", "id", ")", ",", "type", "=", "\"binary\"", ")", "k_vars", ".", "append", "(", "k_var", ")", "ns_problem", ".", "add", "(", "[", "y_var", ",", "k_var", "]", ")", "# This constraint is equivalent to 0 <= y[i] <= k[i].", "ns_problem", ".", "add", "(", "Constraint", "(", "y_var", "-", "k_var", ",", "ub", "=", "0", ",", "name", "=", "\"switch_{}\"", ".", "format", "(", "met", ".", "id", ")", ")", ")", "ns_problem", ".", "update", "(", ")", "# add nullspace constraints", "for", "(", "j", ",", "column", ")", "in", "enumerate", "(", "kernel", ".", "T", ")", ":", "expression", "=", "sympy", ".", "Add", "(", "*", "[", "coef", "*", "ns_problem", ".", "variables", "[", "met", ".", "id", "]", "for", "(", "met", ",", "coef", ")", "in", "zip", "(", "metabolites", ",", "column", ")", "if", "coef", "!=", "0.0", "]", ")", "constraint", "=", "Constraint", "(", "expression", ",", "lb", "=", "0", ",", "ub", "=", "0", ",", "name", "=", "\"ns_{}\"", ".", "format", "(", "j", ")", ")", "ns_problem", ".", "add", "(", "constraint", ")", "# The objective is to minimize the binary indicators k[i], subject to", "# the above inequality constraints.", "ns_problem", ".", "objective", "=", "Objective", "(", "1", ")", "ns_problem", ".", "objective", ".", "set_linear_coefficients", "(", "{", "k_var", ":", "1.", "for", "k_var", "in", "k_vars", "}", ")", "ns_problem", ".", "objective", ".", "direction", "=", "\"min\"", "return", "ns_problem", ",", "k_vars" ]
Create the MILP as defined by equation (13) in [1]_. Parameters ---------- kernel : numpy.array A 2-dimensional array that represents the left nullspace of the stoichiometric matrix which is the nullspace of the transpose of the stoichiometric matrix. metabolites : iterable The metabolites in the nullspace. The length of this vector must equal the first dimension of the nullspace. Model : optlang.Model Model class for a specific optlang interface. Variable : optlang.Variable Variable class for a specific optlang interface. Constraint : optlang.Constraint Constraint class for a specific optlang interface. Objective : optlang.Objective Objective class for a specific optlang interface. References ---------- .. [1] Gevorgyan, A., M. G Poolman, and D. A Fell. "Detection of Stoichiometric Inconsistencies in Biomolecular Models." Bioinformatics 24, no. 19 (2008): 2245.
[ "Create", "the", "MILP", "as", "defined", "by", "equation", "(", "13", ")", "in", "[", "1", "]", "_", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/consistency_helpers.py#L236-L295
opencobra/memote
memote/support/consistency_helpers.py
add_cut
def add_cut(problem, indicators, bound, Constraint): """ Add an integer cut to the problem. Ensure that the same solution involving these indicator variables cannot be found by enforcing their sum to be less than before. Parameters ---------- problem : optlang.Model Specific optlang interface Model instance. indicators : iterable Binary indicator `optlang.Variable`s. bound : int Should be one less than the sum of indicators. Corresponds to P - 1 in equation (14) in [1]_. Constraint : optlang.Constraint Constraint class for a specific optlang interface. References ---------- .. [1] Gevorgyan, A., M. G Poolman, and D. A Fell. "Detection of Stoichiometric Inconsistencies in Biomolecular Models." Bioinformatics 24, no. 19 (2008): 2245. """ cut = Constraint(sympy.Add(*indicators), ub=bound) problem.add(cut) return cut
python
def add_cut(problem, indicators, bound, Constraint): """ Add an integer cut to the problem. Ensure that the same solution involving these indicator variables cannot be found by enforcing their sum to be less than before. Parameters ---------- problem : optlang.Model Specific optlang interface Model instance. indicators : iterable Binary indicator `optlang.Variable`s. bound : int Should be one less than the sum of indicators. Corresponds to P - 1 in equation (14) in [1]_. Constraint : optlang.Constraint Constraint class for a specific optlang interface. References ---------- .. [1] Gevorgyan, A., M. G Poolman, and D. A Fell. "Detection of Stoichiometric Inconsistencies in Biomolecular Models." Bioinformatics 24, no. 19 (2008): 2245. """ cut = Constraint(sympy.Add(*indicators), ub=bound) problem.add(cut) return cut
[ "def", "add_cut", "(", "problem", ",", "indicators", ",", "bound", ",", "Constraint", ")", ":", "cut", "=", "Constraint", "(", "sympy", ".", "Add", "(", "*", "indicators", ")", ",", "ub", "=", "bound", ")", "problem", ".", "add", "(", "cut", ")", "return", "cut" ]
Add an integer cut to the problem. Ensure that the same solution involving these indicator variables cannot be found by enforcing their sum to be less than before. Parameters ---------- problem : optlang.Model Specific optlang interface Model instance. indicators : iterable Binary indicator `optlang.Variable`s. bound : int Should be one less than the sum of indicators. Corresponds to P - 1 in equation (14) in [1]_. Constraint : optlang.Constraint Constraint class for a specific optlang interface. References ---------- .. [1] Gevorgyan, A., M. G Poolman, and D. A Fell. "Detection of Stoichiometric Inconsistencies in Biomolecular Models." Bioinformatics 24, no. 19 (2008): 2245.
[ "Add", "an", "integer", "cut", "to", "the", "problem", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/consistency_helpers.py#L298-L327
opencobra/memote
memote/support/consistency_helpers.py
is_mass_balanced
def is_mass_balanced(reaction): """Confirm that a reaction is mass balanced.""" balance = defaultdict(int) for metabolite, coefficient in iteritems(reaction.metabolites): if metabolite.elements is None or len(metabolite.elements) == 0: return False for element, amount in iteritems(metabolite.elements): balance[element] += coefficient * amount return all(amount == 0 for amount in itervalues(balance))
python
def is_mass_balanced(reaction): """Confirm that a reaction is mass balanced.""" balance = defaultdict(int) for metabolite, coefficient in iteritems(reaction.metabolites): if metabolite.elements is None or len(metabolite.elements) == 0: return False for element, amount in iteritems(metabolite.elements): balance[element] += coefficient * amount return all(amount == 0 for amount in itervalues(balance))
[ "def", "is_mass_balanced", "(", "reaction", ")", ":", "balance", "=", "defaultdict", "(", "int", ")", "for", "metabolite", ",", "coefficient", "in", "iteritems", "(", "reaction", ".", "metabolites", ")", ":", "if", "metabolite", ".", "elements", "is", "None", "or", "len", "(", "metabolite", ".", "elements", ")", "==", "0", ":", "return", "False", "for", "element", ",", "amount", "in", "iteritems", "(", "metabolite", ".", "elements", ")", ":", "balance", "[", "element", "]", "+=", "coefficient", "*", "amount", "return", "all", "(", "amount", "==", "0", "for", "amount", "in", "itervalues", "(", "balance", ")", ")" ]
Confirm that a reaction is mass balanced.
[ "Confirm", "that", "a", "reaction", "is", "mass", "balanced", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/consistency_helpers.py#L330-L338
opencobra/memote
memote/support/consistency_helpers.py
is_charge_balanced
def is_charge_balanced(reaction): """Confirm that a reaction is charge balanced.""" charge = 0 for metabolite, coefficient in iteritems(reaction.metabolites): if metabolite.charge is None: return False charge += coefficient * metabolite.charge return charge == 0
python
def is_charge_balanced(reaction): """Confirm that a reaction is charge balanced.""" charge = 0 for metabolite, coefficient in iteritems(reaction.metabolites): if metabolite.charge is None: return False charge += coefficient * metabolite.charge return charge == 0
[ "def", "is_charge_balanced", "(", "reaction", ")", ":", "charge", "=", "0", "for", "metabolite", ",", "coefficient", "in", "iteritems", "(", "reaction", ".", "metabolites", ")", ":", "if", "metabolite", ".", "charge", "is", "None", ":", "return", "False", "charge", "+=", "coefficient", "*", "metabolite", ".", "charge", "return", "charge", "==", "0" ]
Confirm that a reaction is charge balanced.
[ "Confirm", "that", "a", "reaction", "is", "charge", "balanced", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/consistency_helpers.py#L341-L348
opencobra/memote
memote/experimental/checks.py
check_partial
def check_partial(func, *args, **kwargs): """Create a partial to be used by goodtables.""" new_func = partial(func, *args, **kwargs) new_func.check = func.check return new_func
python
def check_partial(func, *args, **kwargs): """Create a partial to be used by goodtables.""" new_func = partial(func, *args, **kwargs) new_func.check = func.check return new_func
[ "def", "check_partial", "(", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "new_func", "=", "partial", "(", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", "new_func", ".", "check", "=", "func", ".", "check", "return", "new_func" ]
Create a partial to be used by goodtables.
[ "Create", "a", "partial", "to", "be", "used", "by", "goodtables", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/experimental/checks.py#L27-L31
opencobra/memote
memote/experimental/checks.py
gene_id_check
def gene_id_check(genes, errors, columns, row_number): """ Validate gene identifiers against a known set. Parameters ---------- genes : set The known set of gene identifiers. errors : Passed by goodtables. columns : Passed by goodtables. row_number : Passed by goodtables. """ message = ("Gene '{value}' in column {col} and row {row} does not " "appear in the metabolic model.") for column in columns: if "gene" in column['header'] and column['value'] not in genes: message = message.format( value=column['value'], row=row_number, col=column['number']) errors.append({ 'code': 'bad-value', 'message': message, 'row-number': row_number, 'column-number': column['number'], })
python
def gene_id_check(genes, errors, columns, row_number): """ Validate gene identifiers against a known set. Parameters ---------- genes : set The known set of gene identifiers. errors : Passed by goodtables. columns : Passed by goodtables. row_number : Passed by goodtables. """ message = ("Gene '{value}' in column {col} and row {row} does not " "appear in the metabolic model.") for column in columns: if "gene" in column['header'] and column['value'] not in genes: message = message.format( value=column['value'], row=row_number, col=column['number']) errors.append({ 'code': 'bad-value', 'message': message, 'row-number': row_number, 'column-number': column['number'], })
[ "def", "gene_id_check", "(", "genes", ",", "errors", ",", "columns", ",", "row_number", ")", ":", "message", "=", "(", "\"Gene '{value}' in column {col} and row {row} does not \"", "\"appear in the metabolic model.\"", ")", "for", "column", "in", "columns", ":", "if", "\"gene\"", "in", "column", "[", "'header'", "]", "and", "column", "[", "'value'", "]", "not", "in", "genes", ":", "message", "=", "message", ".", "format", "(", "value", "=", "column", "[", "'value'", "]", ",", "row", "=", "row_number", ",", "col", "=", "column", "[", "'number'", "]", ")", "errors", ".", "append", "(", "{", "'code'", ":", "'bad-value'", ",", "'message'", ":", "message", ",", "'row-number'", ":", "row_number", ",", "'column-number'", ":", "column", "[", "'number'", "]", ",", "}", ")" ]
Validate gene identifiers against a known set. Parameters ---------- genes : set The known set of gene identifiers. errors : Passed by goodtables. columns : Passed by goodtables. row_number : Passed by goodtables.
[ "Validate", "gene", "identifiers", "against", "a", "known", "set", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/experimental/checks.py#L35-L64
opencobra/memote
memote/experimental/checks.py
reaction_id_check
def reaction_id_check(reactions, errors, columns, row_number): """ Validate reactions identifiers against a known set. Parameters ---------- reactions : set The known set of reaction identifiers. errors : Passed by goodtables. columns : Passed by goodtables. row_number : Passed by goodtables. """ message = ("Reaction '{value}' in column {col} and row {row} does not " "appear in the metabolic model.") for column in columns: if "reaction" in column['header'] and column['value'] not in reactions: message = message.format( value=column['value'], row=row_number, col=column['number']) errors.append({ 'code': 'bad-value', 'message': message, 'row-number': row_number, 'column-number': column['number'], })
python
def reaction_id_check(reactions, errors, columns, row_number): """ Validate reactions identifiers against a known set. Parameters ---------- reactions : set The known set of reaction identifiers. errors : Passed by goodtables. columns : Passed by goodtables. row_number : Passed by goodtables. """ message = ("Reaction '{value}' in column {col} and row {row} does not " "appear in the metabolic model.") for column in columns: if "reaction" in column['header'] and column['value'] not in reactions: message = message.format( value=column['value'], row=row_number, col=column['number']) errors.append({ 'code': 'bad-value', 'message': message, 'row-number': row_number, 'column-number': column['number'], })
[ "def", "reaction_id_check", "(", "reactions", ",", "errors", ",", "columns", ",", "row_number", ")", ":", "message", "=", "(", "\"Reaction '{value}' in column {col} and row {row} does not \"", "\"appear in the metabolic model.\"", ")", "for", "column", "in", "columns", ":", "if", "\"reaction\"", "in", "column", "[", "'header'", "]", "and", "column", "[", "'value'", "]", "not", "in", "reactions", ":", "message", "=", "message", ".", "format", "(", "value", "=", "column", "[", "'value'", "]", ",", "row", "=", "row_number", ",", "col", "=", "column", "[", "'number'", "]", ")", "errors", ".", "append", "(", "{", "'code'", ":", "'bad-value'", ",", "'message'", ":", "message", ",", "'row-number'", ":", "row_number", ",", "'column-number'", ":", "column", "[", "'number'", "]", ",", "}", ")" ]
Validate reactions identifiers against a known set. Parameters ---------- reactions : set The known set of reaction identifiers. errors : Passed by goodtables. columns : Passed by goodtables. row_number : Passed by goodtables.
[ "Validate", "reactions", "identifiers", "against", "a", "known", "set", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/experimental/checks.py#L68-L97
opencobra/memote
memote/experimental/checks.py
metabolite_id_check
def metabolite_id_check(metabolites, errors, columns, row_number): """ Validate metabolite identifiers against a known set. Parameters ---------- metabolites : set The known set of metabolite identifiers. errors : Passed by goodtables. columns : Passed by goodtables. row_number : Passed by goodtables. """ message = ("Metabolite '{value}' in column {col} and row {row} does not " "appear in the metabolic model.") for column in columns: if "metabolite" in column['header'] and \ column['value'] not in metabolites: message = message.format( value=column['value'], row=row_number, col=column['number']) errors.append({ 'code': 'bad-value', 'message': message, 'row-number': row_number, 'column-number': column['number'], })
python
def metabolite_id_check(metabolites, errors, columns, row_number): """ Validate metabolite identifiers against a known set. Parameters ---------- metabolites : set The known set of metabolite identifiers. errors : Passed by goodtables. columns : Passed by goodtables. row_number : Passed by goodtables. """ message = ("Metabolite '{value}' in column {col} and row {row} does not " "appear in the metabolic model.") for column in columns: if "metabolite" in column['header'] and \ column['value'] not in metabolites: message = message.format( value=column['value'], row=row_number, col=column['number']) errors.append({ 'code': 'bad-value', 'message': message, 'row-number': row_number, 'column-number': column['number'], })
[ "def", "metabolite_id_check", "(", "metabolites", ",", "errors", ",", "columns", ",", "row_number", ")", ":", "message", "=", "(", "\"Metabolite '{value}' in column {col} and row {row} does not \"", "\"appear in the metabolic model.\"", ")", "for", "column", "in", "columns", ":", "if", "\"metabolite\"", "in", "column", "[", "'header'", "]", "and", "column", "[", "'value'", "]", "not", "in", "metabolites", ":", "message", "=", "message", ".", "format", "(", "value", "=", "column", "[", "'value'", "]", ",", "row", "=", "row_number", ",", "col", "=", "column", "[", "'number'", "]", ")", "errors", ".", "append", "(", "{", "'code'", ":", "'bad-value'", ",", "'message'", ":", "message", ",", "'row-number'", ":", "row_number", ",", "'column-number'", ":", "column", "[", "'number'", "]", ",", "}", ")" ]
Validate metabolite identifiers against a known set. Parameters ---------- metabolites : set The known set of metabolite identifiers. errors : Passed by goodtables. columns : Passed by goodtables. row_number : Passed by goodtables.
[ "Validate", "metabolite", "identifiers", "against", "a", "known", "set", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/experimental/checks.py#L101-L131
opencobra/memote
memote/suite/cli/runner.py
run
def run(model, collect, filename, location, ignore_git, pytest_args, exclusive, skip, solver, experimental, custom_tests, deployment, skip_unchanged): """ Run the test suite on a single model and collect results. MODEL: Path to model file. Can also be supplied via the environment variable MEMOTE_MODEL or configured in 'setup.cfg' or 'memote.ini'. """ def is_verbose(arg): return (arg.startswith("--verbosity") or arg.startswith("-v") or arg.startswith("--verbose") or arg.startswith("-q") or arg.startswith("--quiet")) if ignore_git: repo = None else: callbacks.git_installed() repo = callbacks.probe_git() if collect: if repo is not None: if location is None: LOGGER.critical( "Working with a repository requires a storage location.") sys.exit(1) if not any(a.startswith("--tb") for a in pytest_args): pytest_args = ["--tb", "short"] + pytest_args if not any(is_verbose(a) for a in pytest_args): pytest_args.append("-vv") # Check if the model was changed in this commit. Exit `memote run` if this # was not the case. if skip_unchanged and repo is not None: commit = repo.head.commit if not is_modified(model, commit): LOGGER.info("The model was not modified in commit '%s'. Skipping.", commit.hexsha) sys.exit(0) # Add further directories to search for tests. pytest_args.extend(custom_tests) # Check if the model can be loaded at all. model, sbml_ver, notifications = api.validate_model(model) if model is None: LOGGER.critical( "The model could not be loaded due to the following SBML errors.") stdout_notifications(notifications) sys.exit(1) model.solver = solver # Load the experimental configuration using model information. if experimental is not None: experimental.load(model) code, result = api.test_model( model=model, sbml_version=sbml_ver, results=True, pytest_args=pytest_args, skip=skip, exclusive=exclusive, experimental=experimental) if collect: if repo is None: manager = ResultManager() manager.store(result, filename=filename) else: LOGGER.info("Checking out deployment branch.") # If the repo HEAD is pointing to the most recent branch then # GitPython's `repo.active_branch` works. Yet, if the repo is in # detached HEAD state, i.e., when a user has checked out a specific # commit as opposed to a branch, this won't work and throw a # `TypeError`, which we are circumventing below. try: previous = repo.active_branch previous_cmt = previous.commit is_branch = True except TypeError: previous_cmt = repo.head.commit is_branch = False repo.git.checkout(deployment) try: manager = SQLResultManager(repository=repo, location=location) except (AttributeError, ArgumentError): manager = RepoResultManager(repository=repo, location=location) LOGGER.info( "Committing result and changing back to working branch.") manager.store(result, commit=previous_cmt.hexsha) repo.git.add(".") check_call( ['git', 'commit', '-m', "chore: add result for {}".format(previous_cmt.hexsha)] ) if is_branch: previous.checkout() else: repo.commit(previous_cmt)
python
def run(model, collect, filename, location, ignore_git, pytest_args, exclusive, skip, solver, experimental, custom_tests, deployment, skip_unchanged): """ Run the test suite on a single model and collect results. MODEL: Path to model file. Can also be supplied via the environment variable MEMOTE_MODEL or configured in 'setup.cfg' or 'memote.ini'. """ def is_verbose(arg): return (arg.startswith("--verbosity") or arg.startswith("-v") or arg.startswith("--verbose") or arg.startswith("-q") or arg.startswith("--quiet")) if ignore_git: repo = None else: callbacks.git_installed() repo = callbacks.probe_git() if collect: if repo is not None: if location is None: LOGGER.critical( "Working with a repository requires a storage location.") sys.exit(1) if not any(a.startswith("--tb") for a in pytest_args): pytest_args = ["--tb", "short"] + pytest_args if not any(is_verbose(a) for a in pytest_args): pytest_args.append("-vv") # Check if the model was changed in this commit. Exit `memote run` if this # was not the case. if skip_unchanged and repo is not None: commit = repo.head.commit if not is_modified(model, commit): LOGGER.info("The model was not modified in commit '%s'. Skipping.", commit.hexsha) sys.exit(0) # Add further directories to search for tests. pytest_args.extend(custom_tests) # Check if the model can be loaded at all. model, sbml_ver, notifications = api.validate_model(model) if model is None: LOGGER.critical( "The model could not be loaded due to the following SBML errors.") stdout_notifications(notifications) sys.exit(1) model.solver = solver # Load the experimental configuration using model information. if experimental is not None: experimental.load(model) code, result = api.test_model( model=model, sbml_version=sbml_ver, results=True, pytest_args=pytest_args, skip=skip, exclusive=exclusive, experimental=experimental) if collect: if repo is None: manager = ResultManager() manager.store(result, filename=filename) else: LOGGER.info("Checking out deployment branch.") # If the repo HEAD is pointing to the most recent branch then # GitPython's `repo.active_branch` works. Yet, if the repo is in # detached HEAD state, i.e., when a user has checked out a specific # commit as opposed to a branch, this won't work and throw a # `TypeError`, which we are circumventing below. try: previous = repo.active_branch previous_cmt = previous.commit is_branch = True except TypeError: previous_cmt = repo.head.commit is_branch = False repo.git.checkout(deployment) try: manager = SQLResultManager(repository=repo, location=location) except (AttributeError, ArgumentError): manager = RepoResultManager(repository=repo, location=location) LOGGER.info( "Committing result and changing back to working branch.") manager.store(result, commit=previous_cmt.hexsha) repo.git.add(".") check_call( ['git', 'commit', '-m', "chore: add result for {}".format(previous_cmt.hexsha)] ) if is_branch: previous.checkout() else: repo.commit(previous_cmt)
[ "def", "run", "(", "model", ",", "collect", ",", "filename", ",", "location", ",", "ignore_git", ",", "pytest_args", ",", "exclusive", ",", "skip", ",", "solver", ",", "experimental", ",", "custom_tests", ",", "deployment", ",", "skip_unchanged", ")", ":", "def", "is_verbose", "(", "arg", ")", ":", "return", "(", "arg", ".", "startswith", "(", "\"--verbosity\"", ")", "or", "arg", ".", "startswith", "(", "\"-v\"", ")", "or", "arg", ".", "startswith", "(", "\"--verbose\"", ")", "or", "arg", ".", "startswith", "(", "\"-q\"", ")", "or", "arg", ".", "startswith", "(", "\"--quiet\"", ")", ")", "if", "ignore_git", ":", "repo", "=", "None", "else", ":", "callbacks", ".", "git_installed", "(", ")", "repo", "=", "callbacks", ".", "probe_git", "(", ")", "if", "collect", ":", "if", "repo", "is", "not", "None", ":", "if", "location", "is", "None", ":", "LOGGER", ".", "critical", "(", "\"Working with a repository requires a storage location.\"", ")", "sys", ".", "exit", "(", "1", ")", "if", "not", "any", "(", "a", ".", "startswith", "(", "\"--tb\"", ")", "for", "a", "in", "pytest_args", ")", ":", "pytest_args", "=", "[", "\"--tb\"", ",", "\"short\"", "]", "+", "pytest_args", "if", "not", "any", "(", "is_verbose", "(", "a", ")", "for", "a", "in", "pytest_args", ")", ":", "pytest_args", ".", "append", "(", "\"-vv\"", ")", "# Check if the model was changed in this commit. Exit `memote run` if this", "# was not the case.", "if", "skip_unchanged", "and", "repo", "is", "not", "None", ":", "commit", "=", "repo", ".", "head", ".", "commit", "if", "not", "is_modified", "(", "model", ",", "commit", ")", ":", "LOGGER", ".", "info", "(", "\"The model was not modified in commit '%s'. Skipping.\"", ",", "commit", ".", "hexsha", ")", "sys", ".", "exit", "(", "0", ")", "# Add further directories to search for tests.", "pytest_args", ".", "extend", "(", "custom_tests", ")", "# Check if the model can be loaded at all.", "model", ",", "sbml_ver", ",", "notifications", "=", "api", ".", "validate_model", "(", "model", ")", "if", "model", "is", "None", ":", "LOGGER", ".", "critical", "(", "\"The model could not be loaded due to the following SBML errors.\"", ")", "stdout_notifications", "(", "notifications", ")", "sys", ".", "exit", "(", "1", ")", "model", ".", "solver", "=", "solver", "# Load the experimental configuration using model information.", "if", "experimental", "is", "not", "None", ":", "experimental", ".", "load", "(", "model", ")", "code", ",", "result", "=", "api", ".", "test_model", "(", "model", "=", "model", ",", "sbml_version", "=", "sbml_ver", ",", "results", "=", "True", ",", "pytest_args", "=", "pytest_args", ",", "skip", "=", "skip", ",", "exclusive", "=", "exclusive", ",", "experimental", "=", "experimental", ")", "if", "collect", ":", "if", "repo", "is", "None", ":", "manager", "=", "ResultManager", "(", ")", "manager", ".", "store", "(", "result", ",", "filename", "=", "filename", ")", "else", ":", "LOGGER", ".", "info", "(", "\"Checking out deployment branch.\"", ")", "# If the repo HEAD is pointing to the most recent branch then", "# GitPython's `repo.active_branch` works. Yet, if the repo is in", "# detached HEAD state, i.e., when a user has checked out a specific", "# commit as opposed to a branch, this won't work and throw a", "# `TypeError`, which we are circumventing below.", "try", ":", "previous", "=", "repo", ".", "active_branch", "previous_cmt", "=", "previous", ".", "commit", "is_branch", "=", "True", "except", "TypeError", ":", "previous_cmt", "=", "repo", ".", "head", ".", "commit", "is_branch", "=", "False", "repo", ".", "git", ".", "checkout", "(", "deployment", ")", "try", ":", "manager", "=", "SQLResultManager", "(", "repository", "=", "repo", ",", "location", "=", "location", ")", "except", "(", "AttributeError", ",", "ArgumentError", ")", ":", "manager", "=", "RepoResultManager", "(", "repository", "=", "repo", ",", "location", "=", "location", ")", "LOGGER", ".", "info", "(", "\"Committing result and changing back to working branch.\"", ")", "manager", ".", "store", "(", "result", ",", "commit", "=", "previous_cmt", ".", "hexsha", ")", "repo", ".", "git", ".", "add", "(", "\".\"", ")", "check_call", "(", "[", "'git'", ",", "'commit'", ",", "'-m'", ",", "\"chore: add result for {}\"", ".", "format", "(", "previous_cmt", ".", "hexsha", ")", "]", ")", "if", "is_branch", ":", "previous", ".", "checkout", "(", ")", "else", ":", "repo", ".", "commit", "(", "previous_cmt", ")" ]
Run the test suite on a single model and collect results. MODEL: Path to model file. Can also be supplied via the environment variable MEMOTE_MODEL or configured in 'setup.cfg' or 'memote.ini'.
[ "Run", "the", "test", "suite", "on", "a", "single", "model", "and", "collect", "results", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/suite/cli/runner.py#L133-L222
opencobra/memote
memote/suite/cli/runner.py
new
def new(directory, replay): """ Create a suitable model repository structure from a template. By using a cookiecutter template, memote will ask you a couple of questions and set up a new directory structure that will make your life easier. The new directory will be placed in the current directory or respect the given --directory option. """ callbacks.git_installed() if directory is None: directory = os.getcwd() cookiecutter("gh:opencobra/cookiecutter-memote", output_dir=directory, replay=replay)
python
def new(directory, replay): """ Create a suitable model repository structure from a template. By using a cookiecutter template, memote will ask you a couple of questions and set up a new directory structure that will make your life easier. The new directory will be placed in the current directory or respect the given --directory option. """ callbacks.git_installed() if directory is None: directory = os.getcwd() cookiecutter("gh:opencobra/cookiecutter-memote", output_dir=directory, replay=replay)
[ "def", "new", "(", "directory", ",", "replay", ")", ":", "callbacks", ".", "git_installed", "(", ")", "if", "directory", "is", "None", ":", "directory", "=", "os", ".", "getcwd", "(", ")", "cookiecutter", "(", "\"gh:opencobra/cookiecutter-memote\"", ",", "output_dir", "=", "directory", ",", "replay", "=", "replay", ")" ]
Create a suitable model repository structure from a template. By using a cookiecutter template, memote will ask you a couple of questions and set up a new directory structure that will make your life easier. The new directory will be placed in the current directory or respect the given --directory option.
[ "Create", "a", "suitable", "model", "repository", "structure", "from", "a", "template", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/suite/cli/runner.py#L236-L250
opencobra/memote
memote/suite/cli/runner.py
history
def history(model, message, rewrite, solver, location, pytest_args, deployment, commits, skip, exclusive, experimental=None): # noqa: D301 """ Re-compute test results for the git branch history. MODEL is the path to the model file. MESSAGE is a commit message in case results were modified or added. [COMMIT] ... It is possible to list out individual commits that should be re-computed or supply a range <oldest commit>..<newest commit>, for example, memote history model.xml "chore: re-compute history" 6b84d05..cd49c85 There are two distinct modes: \b 1. Completely re-compute test results for each commit in the git history. This should only be necessary when memote is first used with existing model repositories. 2. By giving memote specific commit hashes, it will re-compute test results for those only. This can also be achieved by supplying a commit range. """ # callbacks.validate_path(model) callbacks.git_installed() if location is None: raise click.BadParameter("No 'location' given or configured.") if "--tb" not in pytest_args: pytest_args = ["--tb", "no"] + pytest_args try: LOGGER.info("Identifying git repository!") repo = git.Repo() except git.InvalidGitRepositoryError: LOGGER.critical( "The history requires a git repository in order to follow " "the model's commit history.") sys.exit(1) else: LOGGER.info("Success!") previous = repo.active_branch LOGGER.info("Checking out deployment branch {}.".format(deployment)) repo.git.checkout(deployment) # Temporarily move the results to a new location so that they are # available while checking out the various commits. engine = None tmp_location = mkdtemp() try: # Test if the location can be opened as a database. engine = create_engine(location) engine.dispose() new_location = location if location.startswith("sqlite"): # Copy the SQLite database to a temporary location. Other # databases are not file-based and thus git independent. url = location.split("/", maxsplit=3) if isfile(url[3]): copy2(url[3], tmp_location) new_location = "{}/{}".format( "/".join(url[:3] + [tmp_location]), url[3]) LOGGER.info("Temporarily moving database from '%s' to '%s'.", url[3], join(tmp_location, url[3])) manager = SQLResultManager(repository=repo, location=new_location) except (AttributeError, ArgumentError): LOGGER.info("Temporarily moving results from '%s' to '%s'.", location, tmp_location) move(location, tmp_location) new_location = join(tmp_location, location) manager = RepoResultManager(repository=repo, location=new_location) LOGGER.info("Recomputing result history!") history = HistoryManager(repository=repo, manager=manager) history.load_history(model, skip={deployment}) if len(commits) == 0: commits = list(history.iter_commits()) elif len(commits) == 1 and ".." in commits[0]: commits = repo.git.rev_list(commits[0]).split(os.linesep) for commit in commits: cmt = repo.commit(commit) # Rewrite to full length hexsha. commit = cmt.hexsha if not is_modified(model, cmt): LOGGER.info( "The model was not modified in commit '{}'. " "Skipping.".format(commit)) continue # Should we overwrite an existing result? if commit in history and not rewrite: LOGGER.info( "Result for commit '{}' exists. Skipping.".format(commit)) continue LOGGER.info( "Running the test suite for commit '{}'.".format(commit)) blob = cmt.tree[model] model_obj, sbml_ver, notifications = _model_from_stream( blob.data_stream, blob.name ) if model_obj is None: LOGGER.critical("The model could not be loaded due to the " "following SBML errors.") stdout_notifications(notifications) continue proc = Process( target=_test_history, args=(model_obj, sbml_ver, solver, manager, commit, pytest_args, skip, exclusive, experimental)) proc.start() proc.join() LOGGER.info("Finished recomputing!") # Copy back all new and modified files and add them to the index. LOGGER.info("Committing recomputed results!") repo.git.checkout(deployment) if engine is not None: manager.session.close() if location.startswith("sqlite"): copy2(join(tmp_location, url[3]), url[3]) else: move(new_location, os.getcwd()) repo.git.add(".") check_call(['git', 'commit', '-m', message]) LOGGER.info("Success!") # Checkout the original branch. previous.checkout() LOGGER.info("Done.")
python
def history(model, message, rewrite, solver, location, pytest_args, deployment, commits, skip, exclusive, experimental=None): # noqa: D301 """ Re-compute test results for the git branch history. MODEL is the path to the model file. MESSAGE is a commit message in case results were modified or added. [COMMIT] ... It is possible to list out individual commits that should be re-computed or supply a range <oldest commit>..<newest commit>, for example, memote history model.xml "chore: re-compute history" 6b84d05..cd49c85 There are two distinct modes: \b 1. Completely re-compute test results for each commit in the git history. This should only be necessary when memote is first used with existing model repositories. 2. By giving memote specific commit hashes, it will re-compute test results for those only. This can also be achieved by supplying a commit range. """ # callbacks.validate_path(model) callbacks.git_installed() if location is None: raise click.BadParameter("No 'location' given or configured.") if "--tb" not in pytest_args: pytest_args = ["--tb", "no"] + pytest_args try: LOGGER.info("Identifying git repository!") repo = git.Repo() except git.InvalidGitRepositoryError: LOGGER.critical( "The history requires a git repository in order to follow " "the model's commit history.") sys.exit(1) else: LOGGER.info("Success!") previous = repo.active_branch LOGGER.info("Checking out deployment branch {}.".format(deployment)) repo.git.checkout(deployment) # Temporarily move the results to a new location so that they are # available while checking out the various commits. engine = None tmp_location = mkdtemp() try: # Test if the location can be opened as a database. engine = create_engine(location) engine.dispose() new_location = location if location.startswith("sqlite"): # Copy the SQLite database to a temporary location. Other # databases are not file-based and thus git independent. url = location.split("/", maxsplit=3) if isfile(url[3]): copy2(url[3], tmp_location) new_location = "{}/{}".format( "/".join(url[:3] + [tmp_location]), url[3]) LOGGER.info("Temporarily moving database from '%s' to '%s'.", url[3], join(tmp_location, url[3])) manager = SQLResultManager(repository=repo, location=new_location) except (AttributeError, ArgumentError): LOGGER.info("Temporarily moving results from '%s' to '%s'.", location, tmp_location) move(location, tmp_location) new_location = join(tmp_location, location) manager = RepoResultManager(repository=repo, location=new_location) LOGGER.info("Recomputing result history!") history = HistoryManager(repository=repo, manager=manager) history.load_history(model, skip={deployment}) if len(commits) == 0: commits = list(history.iter_commits()) elif len(commits) == 1 and ".." in commits[0]: commits = repo.git.rev_list(commits[0]).split(os.linesep) for commit in commits: cmt = repo.commit(commit) # Rewrite to full length hexsha. commit = cmt.hexsha if not is_modified(model, cmt): LOGGER.info( "The model was not modified in commit '{}'. " "Skipping.".format(commit)) continue # Should we overwrite an existing result? if commit in history and not rewrite: LOGGER.info( "Result for commit '{}' exists. Skipping.".format(commit)) continue LOGGER.info( "Running the test suite for commit '{}'.".format(commit)) blob = cmt.tree[model] model_obj, sbml_ver, notifications = _model_from_stream( blob.data_stream, blob.name ) if model_obj is None: LOGGER.critical("The model could not be loaded due to the " "following SBML errors.") stdout_notifications(notifications) continue proc = Process( target=_test_history, args=(model_obj, sbml_ver, solver, manager, commit, pytest_args, skip, exclusive, experimental)) proc.start() proc.join() LOGGER.info("Finished recomputing!") # Copy back all new and modified files and add them to the index. LOGGER.info("Committing recomputed results!") repo.git.checkout(deployment) if engine is not None: manager.session.close() if location.startswith("sqlite"): copy2(join(tmp_location, url[3]), url[3]) else: move(new_location, os.getcwd()) repo.git.add(".") check_call(['git', 'commit', '-m', message]) LOGGER.info("Success!") # Checkout the original branch. previous.checkout() LOGGER.info("Done.")
[ "def", "history", "(", "model", ",", "message", ",", "rewrite", ",", "solver", ",", "location", ",", "pytest_args", ",", "deployment", ",", "commits", ",", "skip", ",", "exclusive", ",", "experimental", "=", "None", ")", ":", "# noqa: D301", "# callbacks.validate_path(model)", "callbacks", ".", "git_installed", "(", ")", "if", "location", "is", "None", ":", "raise", "click", ".", "BadParameter", "(", "\"No 'location' given or configured.\"", ")", "if", "\"--tb\"", "not", "in", "pytest_args", ":", "pytest_args", "=", "[", "\"--tb\"", ",", "\"no\"", "]", "+", "pytest_args", "try", ":", "LOGGER", ".", "info", "(", "\"Identifying git repository!\"", ")", "repo", "=", "git", ".", "Repo", "(", ")", "except", "git", ".", "InvalidGitRepositoryError", ":", "LOGGER", ".", "critical", "(", "\"The history requires a git repository in order to follow \"", "\"the model's commit history.\"", ")", "sys", ".", "exit", "(", "1", ")", "else", ":", "LOGGER", ".", "info", "(", "\"Success!\"", ")", "previous", "=", "repo", ".", "active_branch", "LOGGER", ".", "info", "(", "\"Checking out deployment branch {}.\"", ".", "format", "(", "deployment", ")", ")", "repo", ".", "git", ".", "checkout", "(", "deployment", ")", "# Temporarily move the results to a new location so that they are", "# available while checking out the various commits.", "engine", "=", "None", "tmp_location", "=", "mkdtemp", "(", ")", "try", ":", "# Test if the location can be opened as a database.", "engine", "=", "create_engine", "(", "location", ")", "engine", ".", "dispose", "(", ")", "new_location", "=", "location", "if", "location", ".", "startswith", "(", "\"sqlite\"", ")", ":", "# Copy the SQLite database to a temporary location. Other", "# databases are not file-based and thus git independent.", "url", "=", "location", ".", "split", "(", "\"/\"", ",", "maxsplit", "=", "3", ")", "if", "isfile", "(", "url", "[", "3", "]", ")", ":", "copy2", "(", "url", "[", "3", "]", ",", "tmp_location", ")", "new_location", "=", "\"{}/{}\"", ".", "format", "(", "\"/\"", ".", "join", "(", "url", "[", ":", "3", "]", "+", "[", "tmp_location", "]", ")", ",", "url", "[", "3", "]", ")", "LOGGER", ".", "info", "(", "\"Temporarily moving database from '%s' to '%s'.\"", ",", "url", "[", "3", "]", ",", "join", "(", "tmp_location", ",", "url", "[", "3", "]", ")", ")", "manager", "=", "SQLResultManager", "(", "repository", "=", "repo", ",", "location", "=", "new_location", ")", "except", "(", "AttributeError", ",", "ArgumentError", ")", ":", "LOGGER", ".", "info", "(", "\"Temporarily moving results from '%s' to '%s'.\"", ",", "location", ",", "tmp_location", ")", "move", "(", "location", ",", "tmp_location", ")", "new_location", "=", "join", "(", "tmp_location", ",", "location", ")", "manager", "=", "RepoResultManager", "(", "repository", "=", "repo", ",", "location", "=", "new_location", ")", "LOGGER", ".", "info", "(", "\"Recomputing result history!\"", ")", "history", "=", "HistoryManager", "(", "repository", "=", "repo", ",", "manager", "=", "manager", ")", "history", ".", "load_history", "(", "model", ",", "skip", "=", "{", "deployment", "}", ")", "if", "len", "(", "commits", ")", "==", "0", ":", "commits", "=", "list", "(", "history", ".", "iter_commits", "(", ")", ")", "elif", "len", "(", "commits", ")", "==", "1", "and", "\"..\"", "in", "commits", "[", "0", "]", ":", "commits", "=", "repo", ".", "git", ".", "rev_list", "(", "commits", "[", "0", "]", ")", ".", "split", "(", "os", ".", "linesep", ")", "for", "commit", "in", "commits", ":", "cmt", "=", "repo", ".", "commit", "(", "commit", ")", "# Rewrite to full length hexsha.", "commit", "=", "cmt", ".", "hexsha", "if", "not", "is_modified", "(", "model", ",", "cmt", ")", ":", "LOGGER", ".", "info", "(", "\"The model was not modified in commit '{}'. \"", "\"Skipping.\"", ".", "format", "(", "commit", ")", ")", "continue", "# Should we overwrite an existing result?", "if", "commit", "in", "history", "and", "not", "rewrite", ":", "LOGGER", ".", "info", "(", "\"Result for commit '{}' exists. Skipping.\"", ".", "format", "(", "commit", ")", ")", "continue", "LOGGER", ".", "info", "(", "\"Running the test suite for commit '{}'.\"", ".", "format", "(", "commit", ")", ")", "blob", "=", "cmt", ".", "tree", "[", "model", "]", "model_obj", ",", "sbml_ver", ",", "notifications", "=", "_model_from_stream", "(", "blob", ".", "data_stream", ",", "blob", ".", "name", ")", "if", "model_obj", "is", "None", ":", "LOGGER", ".", "critical", "(", "\"The model could not be loaded due to the \"", "\"following SBML errors.\"", ")", "stdout_notifications", "(", "notifications", ")", "continue", "proc", "=", "Process", "(", "target", "=", "_test_history", ",", "args", "=", "(", "model_obj", ",", "sbml_ver", ",", "solver", ",", "manager", ",", "commit", ",", "pytest_args", ",", "skip", ",", "exclusive", ",", "experimental", ")", ")", "proc", ".", "start", "(", ")", "proc", ".", "join", "(", ")", "LOGGER", ".", "info", "(", "\"Finished recomputing!\"", ")", "# Copy back all new and modified files and add them to the index.", "LOGGER", ".", "info", "(", "\"Committing recomputed results!\"", ")", "repo", ".", "git", ".", "checkout", "(", "deployment", ")", "if", "engine", "is", "not", "None", ":", "manager", ".", "session", ".", "close", "(", ")", "if", "location", ".", "startswith", "(", "\"sqlite\"", ")", ":", "copy2", "(", "join", "(", "tmp_location", ",", "url", "[", "3", "]", ")", ",", "url", "[", "3", "]", ")", "else", ":", "move", "(", "new_location", ",", "os", ".", "getcwd", "(", ")", ")", "repo", ".", "git", ".", "add", "(", "\".\"", ")", "check_call", "(", "[", "'git'", ",", "'commit'", ",", "'-m'", ",", "message", "]", ")", "LOGGER", ".", "info", "(", "\"Success!\"", ")", "# Checkout the original branch.", "previous", ".", "checkout", "(", ")", "LOGGER", ".", "info", "(", "\"Done.\"", ")" ]
Re-compute test results for the git branch history. MODEL is the path to the model file. MESSAGE is a commit message in case results were modified or added. [COMMIT] ... It is possible to list out individual commits that should be re-computed or supply a range <oldest commit>..<newest commit>, for example, memote history model.xml "chore: re-compute history" 6b84d05..cd49c85 There are two distinct modes: \b 1. Completely re-compute test results for each commit in the git history. This should only be necessary when memote is first used with existing model repositories. 2. By giving memote specific commit hashes, it will re-compute test results for those only. This can also be achieved by supplying a commit range.
[ "Re", "-", "compute", "test", "results", "for", "the", "git", "branch", "history", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/suite/cli/runner.py#L307-L429
opencobra/memote
memote/suite/cli/runner.py
online
def online(note, github_repository, github_username): """Upload the repository to GitHub and enable testing on Travis CI.""" callbacks.git_installed() try: repo = git.Repo() except git.InvalidGitRepositoryError: LOGGER.critical( "'memote online' requires a git repository in order to follow " "the current branch's commit history.") sys.exit(1) if note == "memote-ci access": note = "{} to {}".format(note, github_repository) # Github API calls # Set up the git repository on GitHub via API v3. gh_repo_name, auth_token, repo_access_token = _setup_gh_repo( github_repository, github_username, note ) # Travis API calls # Configure Travis CI to use Github auth token then return encrypted token. secret = _setup_travis_ci(gh_repo_name, auth_token, repo_access_token) # Save the encrypted token in the travis config then commit and push LOGGER.info("Storing GitHub token in '.travis.yml'.") config = te.load_travis_configuration(".travis.yml") global_env = config.setdefault("env", {}).get("global") if global_env is None: config["env"]["global"] = global_env = {} try: global_env["secure"] = secret except TypeError: global_env.append({"secure": secret}) te.dump_travis_configuration(config, ".travis.yml") LOGGER.info("Add, commit and push changes to '.travis.yml' to GitHub.") repo.index.add([".travis.yml"]) check_call( ['git', 'commit', '-m', "chore: add encrypted GitHub access token"] ) check_call( ['git', 'push', '--set-upstream', 'origin', repo.active_branch.name] )
python
def online(note, github_repository, github_username): """Upload the repository to GitHub and enable testing on Travis CI.""" callbacks.git_installed() try: repo = git.Repo() except git.InvalidGitRepositoryError: LOGGER.critical( "'memote online' requires a git repository in order to follow " "the current branch's commit history.") sys.exit(1) if note == "memote-ci access": note = "{} to {}".format(note, github_repository) # Github API calls # Set up the git repository on GitHub via API v3. gh_repo_name, auth_token, repo_access_token = _setup_gh_repo( github_repository, github_username, note ) # Travis API calls # Configure Travis CI to use Github auth token then return encrypted token. secret = _setup_travis_ci(gh_repo_name, auth_token, repo_access_token) # Save the encrypted token in the travis config then commit and push LOGGER.info("Storing GitHub token in '.travis.yml'.") config = te.load_travis_configuration(".travis.yml") global_env = config.setdefault("env", {}).get("global") if global_env is None: config["env"]["global"] = global_env = {} try: global_env["secure"] = secret except TypeError: global_env.append({"secure": secret}) te.dump_travis_configuration(config, ".travis.yml") LOGGER.info("Add, commit and push changes to '.travis.yml' to GitHub.") repo.index.add([".travis.yml"]) check_call( ['git', 'commit', '-m', "chore: add encrypted GitHub access token"] ) check_call( ['git', 'push', '--set-upstream', 'origin', repo.active_branch.name] )
[ "def", "online", "(", "note", ",", "github_repository", ",", "github_username", ")", ":", "callbacks", ".", "git_installed", "(", ")", "try", ":", "repo", "=", "git", ".", "Repo", "(", ")", "except", "git", ".", "InvalidGitRepositoryError", ":", "LOGGER", ".", "critical", "(", "\"'memote online' requires a git repository in order to follow \"", "\"the current branch's commit history.\"", ")", "sys", ".", "exit", "(", "1", ")", "if", "note", "==", "\"memote-ci access\"", ":", "note", "=", "\"{} to {}\"", ".", "format", "(", "note", ",", "github_repository", ")", "# Github API calls", "# Set up the git repository on GitHub via API v3.", "gh_repo_name", ",", "auth_token", ",", "repo_access_token", "=", "_setup_gh_repo", "(", "github_repository", ",", "github_username", ",", "note", ")", "# Travis API calls", "# Configure Travis CI to use Github auth token then return encrypted token.", "secret", "=", "_setup_travis_ci", "(", "gh_repo_name", ",", "auth_token", ",", "repo_access_token", ")", "# Save the encrypted token in the travis config then commit and push", "LOGGER", ".", "info", "(", "\"Storing GitHub token in '.travis.yml'.\"", ")", "config", "=", "te", ".", "load_travis_configuration", "(", "\".travis.yml\"", ")", "global_env", "=", "config", ".", "setdefault", "(", "\"env\"", ",", "{", "}", ")", ".", "get", "(", "\"global\"", ")", "if", "global_env", "is", "None", ":", "config", "[", "\"env\"", "]", "[", "\"global\"", "]", "=", "global_env", "=", "{", "}", "try", ":", "global_env", "[", "\"secure\"", "]", "=", "secret", "except", "TypeError", ":", "global_env", ".", "append", "(", "{", "\"secure\"", ":", "secret", "}", ")", "te", ".", "dump_travis_configuration", "(", "config", ",", "\".travis.yml\"", ")", "LOGGER", ".", "info", "(", "\"Add, commit and push changes to '.travis.yml' to GitHub.\"", ")", "repo", ".", "index", ".", "add", "(", "[", "\".travis.yml\"", "]", ")", "check_call", "(", "[", "'git'", ",", "'commit'", ",", "'-m'", ",", "\"chore: add encrypted GitHub access token\"", "]", ")", "check_call", "(", "[", "'git'", ",", "'push'", ",", "'--set-upstream'", ",", "'origin'", ",", "repo", ".", "active_branch", ".", "name", "]", ")" ]
Upload the repository to GitHub and enable testing on Travis CI.
[ "Upload", "the", "repository", "to", "GitHub", "and", "enable", "testing", "on", "Travis", "CI", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/suite/cli/runner.py#L737-L780
opencobra/memote
scripts/update_mock_repo.py
update_mock_repo
def update_mock_repo(): """ Clone and gzip the memote-mock-repo used for CLI and integration tests. The repo is hosted at 'https://github.com/ChristianLieven/memote-mock-repo.git' and maintained separately from """ target_file = os.path.abspath( join("tests", "data", "memote-mock-repo.tar.gz") ) temp_dir = mkdtemp(prefix='tmp_mock') previous_wd = os.getcwd() try: LOGGER.info("Cloning repository.") os.chdir(temp_dir) check_output( ['git', 'clone', 'https://github.com/ChristianLieven/memote-mock-repo.git'] ) os.chdir('memote-mock-repo/') LOGGER.info("Setting git to ignore filemode changes.") call( ['git', 'config', 'core.fileMode', 'false'] ) call( ['git', 'config', 'user.email', '[email protected]'] ) call( ['git', 'config', 'user.name', 'memote-bot'] ) finally: LOGGER.info("Compressing to tarball.") tar = tarfile.open(target_file, "w:gz") tar.add( join(temp_dir, 'memote-mock-repo/'), arcname="memote-mock-repo" ) tar.close() LOGGER.info("Success!") LOGGER.info("Removing temporary directory.") rmtree(temp_dir) LOGGER.info("Success! The mock repo has been updated.") os.chdir(previous_wd)
python
def update_mock_repo(): """ Clone and gzip the memote-mock-repo used for CLI and integration tests. The repo is hosted at 'https://github.com/ChristianLieven/memote-mock-repo.git' and maintained separately from """ target_file = os.path.abspath( join("tests", "data", "memote-mock-repo.tar.gz") ) temp_dir = mkdtemp(prefix='tmp_mock') previous_wd = os.getcwd() try: LOGGER.info("Cloning repository.") os.chdir(temp_dir) check_output( ['git', 'clone', 'https://github.com/ChristianLieven/memote-mock-repo.git'] ) os.chdir('memote-mock-repo/') LOGGER.info("Setting git to ignore filemode changes.") call( ['git', 'config', 'core.fileMode', 'false'] ) call( ['git', 'config', 'user.email', '[email protected]'] ) call( ['git', 'config', 'user.name', 'memote-bot'] ) finally: LOGGER.info("Compressing to tarball.") tar = tarfile.open(target_file, "w:gz") tar.add( join(temp_dir, 'memote-mock-repo/'), arcname="memote-mock-repo" ) tar.close() LOGGER.info("Success!") LOGGER.info("Removing temporary directory.") rmtree(temp_dir) LOGGER.info("Success! The mock repo has been updated.") os.chdir(previous_wd)
[ "def", "update_mock_repo", "(", ")", ":", "target_file", "=", "os", ".", "path", ".", "abspath", "(", "join", "(", "\"tests\"", ",", "\"data\"", ",", "\"memote-mock-repo.tar.gz\"", ")", ")", "temp_dir", "=", "mkdtemp", "(", "prefix", "=", "'tmp_mock'", ")", "previous_wd", "=", "os", ".", "getcwd", "(", ")", "try", ":", "LOGGER", ".", "info", "(", "\"Cloning repository.\"", ")", "os", ".", "chdir", "(", "temp_dir", ")", "check_output", "(", "[", "'git'", ",", "'clone'", ",", "'https://github.com/ChristianLieven/memote-mock-repo.git'", "]", ")", "os", ".", "chdir", "(", "'memote-mock-repo/'", ")", "LOGGER", ".", "info", "(", "\"Setting git to ignore filemode changes.\"", ")", "call", "(", "[", "'git'", ",", "'config'", ",", "'core.fileMode'", ",", "'false'", "]", ")", "call", "(", "[", "'git'", ",", "'config'", ",", "'user.email'", ",", "'[email protected]'", "]", ")", "call", "(", "[", "'git'", ",", "'config'", ",", "'user.name'", ",", "'memote-bot'", "]", ")", "finally", ":", "LOGGER", ".", "info", "(", "\"Compressing to tarball.\"", ")", "tar", "=", "tarfile", ".", "open", "(", "target_file", ",", "\"w:gz\"", ")", "tar", ".", "add", "(", "join", "(", "temp_dir", ",", "'memote-mock-repo/'", ")", ",", "arcname", "=", "\"memote-mock-repo\"", ")", "tar", ".", "close", "(", ")", "LOGGER", ".", "info", "(", "\"Success!\"", ")", "LOGGER", ".", "info", "(", "\"Removing temporary directory.\"", ")", "rmtree", "(", "temp_dir", ")", "LOGGER", ".", "info", "(", "\"Success! The mock repo has been updated.\"", ")", "os", ".", "chdir", "(", "previous_wd", ")" ]
Clone and gzip the memote-mock-repo used for CLI and integration tests. The repo is hosted at 'https://github.com/ChristianLieven/memote-mock-repo.git' and maintained separately from
[ "Clone", "and", "gzip", "the", "memote", "-", "mock", "-", "repo", "used", "for", "CLI", "and", "integration", "tests", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/scripts/update_mock_repo.py#L39-L86
opencobra/memote
memote/support/biomass.py
sum_biomass_weight
def sum_biomass_weight(reaction): """ Compute the sum of all reaction compounds. This function expects all metabolites of the biomass reaction to have formula information assigned. Parameters ---------- reaction : cobra.core.reaction.Reaction The biomass reaction of the model under investigation. Returns ------- float The molecular weight of the biomass reaction in units of g/mmol. """ return sum(-coef * met.formula_weight for (met, coef) in iteritems(reaction.metabolites)) / 1000.0
python
def sum_biomass_weight(reaction): """ Compute the sum of all reaction compounds. This function expects all metabolites of the biomass reaction to have formula information assigned. Parameters ---------- reaction : cobra.core.reaction.Reaction The biomass reaction of the model under investigation. Returns ------- float The molecular weight of the biomass reaction in units of g/mmol. """ return sum(-coef * met.formula_weight for (met, coef) in iteritems(reaction.metabolites)) / 1000.0
[ "def", "sum_biomass_weight", "(", "reaction", ")", ":", "return", "sum", "(", "-", "coef", "*", "met", ".", "formula_weight", "for", "(", "met", ",", "coef", ")", "in", "iteritems", "(", "reaction", ".", "metabolites", ")", ")", "/", "1000.0" ]
Compute the sum of all reaction compounds. This function expects all metabolites of the biomass reaction to have formula information assigned. Parameters ---------- reaction : cobra.core.reaction.Reaction The biomass reaction of the model under investigation. Returns ------- float The molecular weight of the biomass reaction in units of g/mmol.
[ "Compute", "the", "sum", "of", "all", "reaction", "compounds", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/biomass.py#L69-L88
opencobra/memote
memote/support/biomass.py
find_biomass_precursors
def find_biomass_precursors(model, reaction): """ Return a list of all biomass precursors excluding ATP and H2O. Parameters ---------- reaction : cobra.core.reaction.Reaction The biomass reaction of the model under investigation. model : cobra.Model The metabolic model under investigation. Returns ------- list Metabolite objects that are reactants of the biomass reaction excluding ATP and H2O. """ id_of_main_compartment = helpers.find_compartment_id_in_model(model, 'c') gam_reactants = set() try: gam_reactants.update([ helpers.find_met_in_model( model, "MNXM3", id_of_main_compartment)[0]]) except RuntimeError: pass try: gam_reactants.update([ helpers.find_met_in_model( model, "MNXM2", id_of_main_compartment)[0]]) except RuntimeError: pass biomass_precursors = set(reaction.reactants) - gam_reactants return list(biomass_precursors)
python
def find_biomass_precursors(model, reaction): """ Return a list of all biomass precursors excluding ATP and H2O. Parameters ---------- reaction : cobra.core.reaction.Reaction The biomass reaction of the model under investigation. model : cobra.Model The metabolic model under investigation. Returns ------- list Metabolite objects that are reactants of the biomass reaction excluding ATP and H2O. """ id_of_main_compartment = helpers.find_compartment_id_in_model(model, 'c') gam_reactants = set() try: gam_reactants.update([ helpers.find_met_in_model( model, "MNXM3", id_of_main_compartment)[0]]) except RuntimeError: pass try: gam_reactants.update([ helpers.find_met_in_model( model, "MNXM2", id_of_main_compartment)[0]]) except RuntimeError: pass biomass_precursors = set(reaction.reactants) - gam_reactants return list(biomass_precursors)
[ "def", "find_biomass_precursors", "(", "model", ",", "reaction", ")", ":", "id_of_main_compartment", "=", "helpers", ".", "find_compartment_id_in_model", "(", "model", ",", "'c'", ")", "gam_reactants", "=", "set", "(", ")", "try", ":", "gam_reactants", ".", "update", "(", "[", "helpers", ".", "find_met_in_model", "(", "model", ",", "\"MNXM3\"", ",", "id_of_main_compartment", ")", "[", "0", "]", "]", ")", "except", "RuntimeError", ":", "pass", "try", ":", "gam_reactants", ".", "update", "(", "[", "helpers", ".", "find_met_in_model", "(", "model", ",", "\"MNXM2\"", ",", "id_of_main_compartment", ")", "[", "0", "]", "]", ")", "except", "RuntimeError", ":", "pass", "biomass_precursors", "=", "set", "(", "reaction", ".", "reactants", ")", "-", "gam_reactants", "return", "list", "(", "biomass_precursors", ")" ]
Return a list of all biomass precursors excluding ATP and H2O. Parameters ---------- reaction : cobra.core.reaction.Reaction The biomass reaction of the model under investigation. model : cobra.Model The metabolic model under investigation. Returns ------- list Metabolite objects that are reactants of the biomass reaction excluding ATP and H2O.
[ "Return", "a", "list", "of", "all", "biomass", "precursors", "excluding", "ATP", "and", "H2O", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/biomass.py#L91-L126
opencobra/memote
memote/support/biomass.py
find_blocked_biomass_precursors
def find_blocked_biomass_precursors(reaction, model): """ Return a list of all biomass precursors that cannot be produced. Parameters ---------- reaction : cobra.core.reaction.Reaction The biomass reaction of the model under investigation. model : cobra.Model The metabolic model under investigation. Returns ------- list Metabolite objects that are reactants of the biomass reaction excluding ATP and H2O that cannot be produced by flux balance analysis. """ LOGGER.debug("Finding blocked biomass precursors") precursors = find_biomass_precursors(model, reaction) blocked_precursors = list() _, ub = helpers.find_bounds(model) for precursor in precursors: with model: dm_rxn = model.add_boundary( precursor, type="safe-demand", reaction_id="safe_demand", lb=0, ub=ub ) flux = helpers.run_fba(model, dm_rxn.id, direction='max') if np.isnan(flux) or abs(flux) < 1E-08: blocked_precursors.append(precursor) return blocked_precursors
python
def find_blocked_biomass_precursors(reaction, model): """ Return a list of all biomass precursors that cannot be produced. Parameters ---------- reaction : cobra.core.reaction.Reaction The biomass reaction of the model under investigation. model : cobra.Model The metabolic model under investigation. Returns ------- list Metabolite objects that are reactants of the biomass reaction excluding ATP and H2O that cannot be produced by flux balance analysis. """ LOGGER.debug("Finding blocked biomass precursors") precursors = find_biomass_precursors(model, reaction) blocked_precursors = list() _, ub = helpers.find_bounds(model) for precursor in precursors: with model: dm_rxn = model.add_boundary( precursor, type="safe-demand", reaction_id="safe_demand", lb=0, ub=ub ) flux = helpers.run_fba(model, dm_rxn.id, direction='max') if np.isnan(flux) or abs(flux) < 1E-08: blocked_precursors.append(precursor) return blocked_precursors
[ "def", "find_blocked_biomass_precursors", "(", "reaction", ",", "model", ")", ":", "LOGGER", ".", "debug", "(", "\"Finding blocked biomass precursors\"", ")", "precursors", "=", "find_biomass_precursors", "(", "model", ",", "reaction", ")", "blocked_precursors", "=", "list", "(", ")", "_", ",", "ub", "=", "helpers", ".", "find_bounds", "(", "model", ")", "for", "precursor", "in", "precursors", ":", "with", "model", ":", "dm_rxn", "=", "model", ".", "add_boundary", "(", "precursor", ",", "type", "=", "\"safe-demand\"", ",", "reaction_id", "=", "\"safe_demand\"", ",", "lb", "=", "0", ",", "ub", "=", "ub", ")", "flux", "=", "helpers", ".", "run_fba", "(", "model", ",", "dm_rxn", ".", "id", ",", "direction", "=", "'max'", ")", "if", "np", ".", "isnan", "(", "flux", ")", "or", "abs", "(", "flux", ")", "<", "1E-08", ":", "blocked_precursors", ".", "append", "(", "precursor", ")", "return", "blocked_precursors" ]
Return a list of all biomass precursors that cannot be produced. Parameters ---------- reaction : cobra.core.reaction.Reaction The biomass reaction of the model under investigation. model : cobra.Model The metabolic model under investigation. Returns ------- list Metabolite objects that are reactants of the biomass reaction excluding ATP and H2O that cannot be produced by flux balance analysis.
[ "Return", "a", "list", "of", "all", "biomass", "precursors", "that", "cannot", "be", "produced", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/biomass.py#L129-L163
opencobra/memote
memote/support/biomass.py
gam_in_biomass
def gam_in_biomass(model, reaction): """ Return boolean if biomass reaction includes growth-associated maintenance. Parameters ---------- model : cobra.Model The metabolic model under investigation. reaction : cobra.core.reaction.Reaction The biomass reaction of the model under investigation. Returns ------- boolean True if the biomass reaction includes ATP and H2O as reactants and ADP, Pi and H as products, False otherwise. """ id_of_main_compartment = helpers.find_compartment_id_in_model(model, 'c') try: left = { helpers.find_met_in_model( model, "MNXM3", id_of_main_compartment)[0], helpers.find_met_in_model( model, "MNXM2", id_of_main_compartment)[0] } right = { helpers.find_met_in_model( model, "MNXM7", id_of_main_compartment)[0], helpers.find_met_in_model( model, "MNXM1", id_of_main_compartment)[0], helpers.find_met_in_model( model, "MNXM9", id_of_main_compartment)[0] } except RuntimeError: return False return ( left.issubset(set(reaction.reactants)) and right.issubset(set(reaction.products)))
python
def gam_in_biomass(model, reaction): """ Return boolean if biomass reaction includes growth-associated maintenance. Parameters ---------- model : cobra.Model The metabolic model under investigation. reaction : cobra.core.reaction.Reaction The biomass reaction of the model under investigation. Returns ------- boolean True if the biomass reaction includes ATP and H2O as reactants and ADP, Pi and H as products, False otherwise. """ id_of_main_compartment = helpers.find_compartment_id_in_model(model, 'c') try: left = { helpers.find_met_in_model( model, "MNXM3", id_of_main_compartment)[0], helpers.find_met_in_model( model, "MNXM2", id_of_main_compartment)[0] } right = { helpers.find_met_in_model( model, "MNXM7", id_of_main_compartment)[0], helpers.find_met_in_model( model, "MNXM1", id_of_main_compartment)[0], helpers.find_met_in_model( model, "MNXM9", id_of_main_compartment)[0] } except RuntimeError: return False return ( left.issubset(set(reaction.reactants)) and right.issubset(set(reaction.products)))
[ "def", "gam_in_biomass", "(", "model", ",", "reaction", ")", ":", "id_of_main_compartment", "=", "helpers", ".", "find_compartment_id_in_model", "(", "model", ",", "'c'", ")", "try", ":", "left", "=", "{", "helpers", ".", "find_met_in_model", "(", "model", ",", "\"MNXM3\"", ",", "id_of_main_compartment", ")", "[", "0", "]", ",", "helpers", ".", "find_met_in_model", "(", "model", ",", "\"MNXM2\"", ",", "id_of_main_compartment", ")", "[", "0", "]", "}", "right", "=", "{", "helpers", ".", "find_met_in_model", "(", "model", ",", "\"MNXM7\"", ",", "id_of_main_compartment", ")", "[", "0", "]", ",", "helpers", ".", "find_met_in_model", "(", "model", ",", "\"MNXM1\"", ",", "id_of_main_compartment", ")", "[", "0", "]", ",", "helpers", ".", "find_met_in_model", "(", "model", ",", "\"MNXM9\"", ",", "id_of_main_compartment", ")", "[", "0", "]", "}", "except", "RuntimeError", ":", "return", "False", "return", "(", "left", ".", "issubset", "(", "set", "(", "reaction", ".", "reactants", ")", ")", "and", "right", ".", "issubset", "(", "set", "(", "reaction", ".", "products", ")", ")", ")" ]
Return boolean if biomass reaction includes growth-associated maintenance. Parameters ---------- model : cobra.Model The metabolic model under investigation. reaction : cobra.core.reaction.Reaction The biomass reaction of the model under investigation. Returns ------- boolean True if the biomass reaction includes ATP and H2O as reactants and ADP, Pi and H as products, False otherwise.
[ "Return", "boolean", "if", "biomass", "reaction", "includes", "growth", "-", "associated", "maintenance", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/biomass.py#L166-L206
opencobra/memote
memote/support/biomass.py
find_direct_metabolites
def find_direct_metabolites(model, reaction, tolerance=1E-06): """ Return list of possible direct biomass precursor metabolites. The term direct metabolites describes metabolites that are involved only in either transport and/or boundary reactions, AND the biomass reaction(s), but not in any purely metabolic reactions. Parameters ---------- model : cobra.Model The metabolic model under investigation. reaction : cobra.Reaction The biomass reaction of the model under investigation. tolerance : float, optional Tolerance below which values will be regarded as zero. Returns ------- list Metabolites that qualify as direct metabolites i.e. biomass precursors that are taken up to be consumed by the biomass reaction only. """ biomass_rxns = set(helpers.find_biomass_reaction(model)) tra_bou_bio_rxns = helpers.find_interchange_biomass_reactions( model, biomass_rxns) try: precursors = find_biomass_precursors(model, reaction) main_comp = helpers.find_compartment_id_in_model(model, 'c') ext_space = helpers.find_compartment_id_in_model(model, 'e') except KeyError: LOGGER.error("Failed to properly identify cytosolic and extracellular " "compartments.") raise_with_traceback(KeyError("The cytosolic and/or extracellular " "compartments could not be identified.")) except RuntimeError: LOGGER.error("Failed to properly identify cytosolic and extracellular " "compartments.") raise_with_traceback(RuntimeError("The cytosolic and/or extracellular " "compartments could not be " "identified.")) else: tra_bou_bio_mets = [met for met in precursors if met.reactions.issubset(tra_bou_bio_rxns)] rxns_of_interest = set([rxn for met in tra_bou_bio_mets for rxn in met.reactions if rxn not in biomass_rxns]) solution = model.optimize(raise_error=True) if np.isclose(solution.objective_value, 0, atol=tolerance): LOGGER.error("Failed to generate a non-zero objective value with " "flux balance analysis.") raise OptimizationError( "The flux balance analysis on this model returned an " "objective value of zero. Make sure the model can " "grow! Check if the constraints are not too strict!") tra_bou_bio_fluxes = {r: solution[r.id] for r in rxns_of_interest} met_flux_sum = {m: 0 for m in tra_bou_bio_mets} return detect_false_positive_direct_metabolites( tra_bou_bio_mets, biomass_rxns, main_comp, ext_space, tra_bou_bio_fluxes, met_flux_sum)
python
def find_direct_metabolites(model, reaction, tolerance=1E-06): """ Return list of possible direct biomass precursor metabolites. The term direct metabolites describes metabolites that are involved only in either transport and/or boundary reactions, AND the biomass reaction(s), but not in any purely metabolic reactions. Parameters ---------- model : cobra.Model The metabolic model under investigation. reaction : cobra.Reaction The biomass reaction of the model under investigation. tolerance : float, optional Tolerance below which values will be regarded as zero. Returns ------- list Metabolites that qualify as direct metabolites i.e. biomass precursors that are taken up to be consumed by the biomass reaction only. """ biomass_rxns = set(helpers.find_biomass_reaction(model)) tra_bou_bio_rxns = helpers.find_interchange_biomass_reactions( model, biomass_rxns) try: precursors = find_biomass_precursors(model, reaction) main_comp = helpers.find_compartment_id_in_model(model, 'c') ext_space = helpers.find_compartment_id_in_model(model, 'e') except KeyError: LOGGER.error("Failed to properly identify cytosolic and extracellular " "compartments.") raise_with_traceback(KeyError("The cytosolic and/or extracellular " "compartments could not be identified.")) except RuntimeError: LOGGER.error("Failed to properly identify cytosolic and extracellular " "compartments.") raise_with_traceback(RuntimeError("The cytosolic and/or extracellular " "compartments could not be " "identified.")) else: tra_bou_bio_mets = [met for met in precursors if met.reactions.issubset(tra_bou_bio_rxns)] rxns_of_interest = set([rxn for met in tra_bou_bio_mets for rxn in met.reactions if rxn not in biomass_rxns]) solution = model.optimize(raise_error=True) if np.isclose(solution.objective_value, 0, atol=tolerance): LOGGER.error("Failed to generate a non-zero objective value with " "flux balance analysis.") raise OptimizationError( "The flux balance analysis on this model returned an " "objective value of zero. Make sure the model can " "grow! Check if the constraints are not too strict!") tra_bou_bio_fluxes = {r: solution[r.id] for r in rxns_of_interest} met_flux_sum = {m: 0 for m in tra_bou_bio_mets} return detect_false_positive_direct_metabolites( tra_bou_bio_mets, biomass_rxns, main_comp, ext_space, tra_bou_bio_fluxes, met_flux_sum)
[ "def", "find_direct_metabolites", "(", "model", ",", "reaction", ",", "tolerance", "=", "1E-06", ")", ":", "biomass_rxns", "=", "set", "(", "helpers", ".", "find_biomass_reaction", "(", "model", ")", ")", "tra_bou_bio_rxns", "=", "helpers", ".", "find_interchange_biomass_reactions", "(", "model", ",", "biomass_rxns", ")", "try", ":", "precursors", "=", "find_biomass_precursors", "(", "model", ",", "reaction", ")", "main_comp", "=", "helpers", ".", "find_compartment_id_in_model", "(", "model", ",", "'c'", ")", "ext_space", "=", "helpers", ".", "find_compartment_id_in_model", "(", "model", ",", "'e'", ")", "except", "KeyError", ":", "LOGGER", ".", "error", "(", "\"Failed to properly identify cytosolic and extracellular \"", "\"compartments.\"", ")", "raise_with_traceback", "(", "KeyError", "(", "\"The cytosolic and/or extracellular \"", "\"compartments could not be identified.\"", ")", ")", "except", "RuntimeError", ":", "LOGGER", ".", "error", "(", "\"Failed to properly identify cytosolic and extracellular \"", "\"compartments.\"", ")", "raise_with_traceback", "(", "RuntimeError", "(", "\"The cytosolic and/or extracellular \"", "\"compartments could not be \"", "\"identified.\"", ")", ")", "else", ":", "tra_bou_bio_mets", "=", "[", "met", "for", "met", "in", "precursors", "if", "met", ".", "reactions", ".", "issubset", "(", "tra_bou_bio_rxns", ")", "]", "rxns_of_interest", "=", "set", "(", "[", "rxn", "for", "met", "in", "tra_bou_bio_mets", "for", "rxn", "in", "met", ".", "reactions", "if", "rxn", "not", "in", "biomass_rxns", "]", ")", "solution", "=", "model", ".", "optimize", "(", "raise_error", "=", "True", ")", "if", "np", ".", "isclose", "(", "solution", ".", "objective_value", ",", "0", ",", "atol", "=", "tolerance", ")", ":", "LOGGER", ".", "error", "(", "\"Failed to generate a non-zero objective value with \"", "\"flux balance analysis.\"", ")", "raise", "OptimizationError", "(", "\"The flux balance analysis on this model returned an \"", "\"objective value of zero. Make sure the model can \"", "\"grow! Check if the constraints are not too strict!\"", ")", "tra_bou_bio_fluxes", "=", "{", "r", ":", "solution", "[", "r", ".", "id", "]", "for", "r", "in", "rxns_of_interest", "}", "met_flux_sum", "=", "{", "m", ":", "0", "for", "m", "in", "tra_bou_bio_mets", "}", "return", "detect_false_positive_direct_metabolites", "(", "tra_bou_bio_mets", ",", "biomass_rxns", ",", "main_comp", ",", "ext_space", ",", "tra_bou_bio_fluxes", ",", "met_flux_sum", ")" ]
Return list of possible direct biomass precursor metabolites. The term direct metabolites describes metabolites that are involved only in either transport and/or boundary reactions, AND the biomass reaction(s), but not in any purely metabolic reactions. Parameters ---------- model : cobra.Model The metabolic model under investigation. reaction : cobra.Reaction The biomass reaction of the model under investigation. tolerance : float, optional Tolerance below which values will be regarded as zero. Returns ------- list Metabolites that qualify as direct metabolites i.e. biomass precursors that are taken up to be consumed by the biomass reaction only.
[ "Return", "list", "of", "possible", "direct", "biomass", "precursor", "metabolites", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/biomass.py#L209-L272
opencobra/memote
memote/support/biomass.py
detect_false_positive_direct_metabolites
def detect_false_positive_direct_metabolites( candidates, biomass_reactions, cytosol, extra, reaction_fluxes, metabolite_fluxes): """ Weed out false positive direct metabolites. False positives exists in the extracellular compartment with flux from the cytosolic compartment and are part of the biomass reaction(s). It sums fluxes positively or negatively depending on if direct metabolites in the extracellular compartment are defined as reactants or products in various reactions. Parameters ---------- candidates : list of cobra.Metabolite Candidate direct metabolites. biomass_reactions : set of cobra.Reaction The biomass reactions. Usually one or two. cytosol : str The identifier of the cytosolic compartment. extra : str The identifier of the extracellular compartment. Returns ------- list Definitive list of direct metabolites, i.e., biomass precursors that are taken up to be consumed by the biomass reaction only. """ for met in candidates: is_internal = met.compartment != extra for rxn in met.reactions: if rxn in biomass_reactions: continue # Internal metabolites can not be false positives. if is_internal: metabolite_fluxes[met] += abs(reaction_fluxes[rxn]) continue # if the metabolite is in the "e" compartment and a reactant, # sum the fluxes accordingly (outward=negative, inward=positive) if met in rxn.reactants: product_comps = set([p.compartment for p in rxn.products]) # if the reaction has no product (outward flux) if len(product_comps) == 0: metabolite_fluxes[met] += -reaction_fluxes[rxn] # if the reaction has a product in "c" (inward flux) elif cytosol in product_comps: metabolite_fluxes[met] += reaction_fluxes[rxn] # if the metabolite is in the "e" compartment and a product, # sum the fluxes accordingly (outward=negative, inward=positive) elif met in rxn.products: reactant_comps = set([p.compartment for p in rxn.reactants]) # if the reaction has no reactant (inward flux) if len(reactant_comps) == 0: metabolite_fluxes[met] += reaction_fluxes[rxn] # if the reaction has a reactant in "c" (outward flux) elif cytosol in reactant_comps: metabolite_fluxes[met] += -reaction_fluxes[rxn] return [m for m, f in iteritems(metabolite_fluxes) if f > 0]
python
def detect_false_positive_direct_metabolites( candidates, biomass_reactions, cytosol, extra, reaction_fluxes, metabolite_fluxes): """ Weed out false positive direct metabolites. False positives exists in the extracellular compartment with flux from the cytosolic compartment and are part of the biomass reaction(s). It sums fluxes positively or negatively depending on if direct metabolites in the extracellular compartment are defined as reactants or products in various reactions. Parameters ---------- candidates : list of cobra.Metabolite Candidate direct metabolites. biomass_reactions : set of cobra.Reaction The biomass reactions. Usually one or two. cytosol : str The identifier of the cytosolic compartment. extra : str The identifier of the extracellular compartment. Returns ------- list Definitive list of direct metabolites, i.e., biomass precursors that are taken up to be consumed by the biomass reaction only. """ for met in candidates: is_internal = met.compartment != extra for rxn in met.reactions: if rxn in biomass_reactions: continue # Internal metabolites can not be false positives. if is_internal: metabolite_fluxes[met] += abs(reaction_fluxes[rxn]) continue # if the metabolite is in the "e" compartment and a reactant, # sum the fluxes accordingly (outward=negative, inward=positive) if met in rxn.reactants: product_comps = set([p.compartment for p in rxn.products]) # if the reaction has no product (outward flux) if len(product_comps) == 0: metabolite_fluxes[met] += -reaction_fluxes[rxn] # if the reaction has a product in "c" (inward flux) elif cytosol in product_comps: metabolite_fluxes[met] += reaction_fluxes[rxn] # if the metabolite is in the "e" compartment and a product, # sum the fluxes accordingly (outward=negative, inward=positive) elif met in rxn.products: reactant_comps = set([p.compartment for p in rxn.reactants]) # if the reaction has no reactant (inward flux) if len(reactant_comps) == 0: metabolite_fluxes[met] += reaction_fluxes[rxn] # if the reaction has a reactant in "c" (outward flux) elif cytosol in reactant_comps: metabolite_fluxes[met] += -reaction_fluxes[rxn] return [m for m, f in iteritems(metabolite_fluxes) if f > 0]
[ "def", "detect_false_positive_direct_metabolites", "(", "candidates", ",", "biomass_reactions", ",", "cytosol", ",", "extra", ",", "reaction_fluxes", ",", "metabolite_fluxes", ")", ":", "for", "met", "in", "candidates", ":", "is_internal", "=", "met", ".", "compartment", "!=", "extra", "for", "rxn", "in", "met", ".", "reactions", ":", "if", "rxn", "in", "biomass_reactions", ":", "continue", "# Internal metabolites can not be false positives.", "if", "is_internal", ":", "metabolite_fluxes", "[", "met", "]", "+=", "abs", "(", "reaction_fluxes", "[", "rxn", "]", ")", "continue", "# if the metabolite is in the \"e\" compartment and a reactant,", "# sum the fluxes accordingly (outward=negative, inward=positive)", "if", "met", "in", "rxn", ".", "reactants", ":", "product_comps", "=", "set", "(", "[", "p", ".", "compartment", "for", "p", "in", "rxn", ".", "products", "]", ")", "# if the reaction has no product (outward flux)", "if", "len", "(", "product_comps", ")", "==", "0", ":", "metabolite_fluxes", "[", "met", "]", "+=", "-", "reaction_fluxes", "[", "rxn", "]", "# if the reaction has a product in \"c\" (inward flux)", "elif", "cytosol", "in", "product_comps", ":", "metabolite_fluxes", "[", "met", "]", "+=", "reaction_fluxes", "[", "rxn", "]", "# if the metabolite is in the \"e\" compartment and a product,", "# sum the fluxes accordingly (outward=negative, inward=positive)", "elif", "met", "in", "rxn", ".", "products", ":", "reactant_comps", "=", "set", "(", "[", "p", ".", "compartment", "for", "p", "in", "rxn", ".", "reactants", "]", ")", "# if the reaction has no reactant (inward flux)", "if", "len", "(", "reactant_comps", ")", "==", "0", ":", "metabolite_fluxes", "[", "met", "]", "+=", "reaction_fluxes", "[", "rxn", "]", "# if the reaction has a reactant in \"c\" (outward flux)", "elif", "cytosol", "in", "reactant_comps", ":", "metabolite_fluxes", "[", "met", "]", "+=", "-", "reaction_fluxes", "[", "rxn", "]", "return", "[", "m", "for", "m", ",", "f", "in", "iteritems", "(", "metabolite_fluxes", ")", "if", "f", ">", "0", "]" ]
Weed out false positive direct metabolites. False positives exists in the extracellular compartment with flux from the cytosolic compartment and are part of the biomass reaction(s). It sums fluxes positively or negatively depending on if direct metabolites in the extracellular compartment are defined as reactants or products in various reactions. Parameters ---------- candidates : list of cobra.Metabolite Candidate direct metabolites. biomass_reactions : set of cobra.Reaction The biomass reactions. Usually one or two. cytosol : str The identifier of the cytosolic compartment. extra : str The identifier of the extracellular compartment. Returns ------- list Definitive list of direct metabolites, i.e., biomass precursors that are taken up to be consumed by the biomass reaction only.
[ "Weed", "out", "false", "positive", "direct", "metabolites", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/biomass.py#L275-L334
opencobra/memote
memote/support/biomass.py
bundle_biomass_components
def bundle_biomass_components(model, reaction): """ Return bundle biomass component reactions if it is not one lumped reaction. There are two basic ways of specifying the biomass composition. The most common is a single lumped reaction containing all biomass precursors. Alternatively, the biomass equation can be split into several reactions each focusing on a different macromolecular component for instance a (1 gDW ash) + b (1 gDW phospholipids) + c (free fatty acids)+ d (1 gDW carbs) + e (1 gDW protein) + f (1 gDW RNA) + g (1 gDW DNA) + h (vitamins/cofactors) + xATP + xH2O-> 1 gDCW biomass + xADP + xH + xPi. This function aims to identify if the given biomass reaction 'reaction', is a lumped all-in-one reaction, or whether it is just the final composing reaction of all macromolecular components. It is important to identify which other reaction belong to a given biomass reaction to be able to identify universal biomass components or calculate detailed precursor stoichiometries. Parameters ---------- model : cobra.Model The metabolic model under investigation. reaction : cobra.core.reaction.Reaction The biomass reaction of the model under investigation. Returns ------- list One or more reactions that qualify as THE biomass equation together. Notes ----- Counting H2O, ADP, Pi, H, and ATP, the amount of metabolites in a split reaction is comparatively low: Any reaction with less or equal to 15 metabolites can probably be counted as a split reaction containing Ash, Phospholipids, Fatty Acids, Carbohydrates (i.e. cell wall components), Protein, RNA, DNA, Cofactors and Vitamins, and Small Molecules. Any reaction with more than or equal to 28 metabolites, however, (21 AA + 3 Nucleotides (4-ATP) + 4 Deoxy-Nucleotides) can be considered a lumped reaction. Anything in between will be treated conservatively as a lumped reaction. For split reactions, after removing any of the metabolites associated with growth-associated energy expenditure (H2O, ADP, Pi, H, and ATP), the only remaining metabolites should be generalized macromolecule precursors e.g. Protein, Phospholipids etc. Each of these have their own composing reactions. Hence we include the reactions of these metabolites in the set that ultimately makes up the returned list of reactions that together make up the biomass equation. """ if len(reaction.metabolites) >= 16: return [reaction] id_of_main_compartment = helpers.find_compartment_id_in_model(model, 'c') gam_mets = ["MNXM3", "MNXM2", "MNXM7", "MNXM1", 'MNXM9'] try: gam = set([helpers.find_met_in_model( model, met, id_of_main_compartment)[0] for met in gam_mets]) except RuntimeError: gam = set() regex = re.compile('^{}(_[a-zA-Z]+?)*?$'.format('biomass'), re.IGNORECASE) biomass_metabolite = set(model.metabolites.query(regex)) macromolecules = set(reaction.metabolites) - gam - biomass_metabolite bundled_reactions = set() for met in macromolecules: bundled_reactions = bundled_reactions | set(met.reactions) return list(bundled_reactions)
python
def bundle_biomass_components(model, reaction): """ Return bundle biomass component reactions if it is not one lumped reaction. There are two basic ways of specifying the biomass composition. The most common is a single lumped reaction containing all biomass precursors. Alternatively, the biomass equation can be split into several reactions each focusing on a different macromolecular component for instance a (1 gDW ash) + b (1 gDW phospholipids) + c (free fatty acids)+ d (1 gDW carbs) + e (1 gDW protein) + f (1 gDW RNA) + g (1 gDW DNA) + h (vitamins/cofactors) + xATP + xH2O-> 1 gDCW biomass + xADP + xH + xPi. This function aims to identify if the given biomass reaction 'reaction', is a lumped all-in-one reaction, or whether it is just the final composing reaction of all macromolecular components. It is important to identify which other reaction belong to a given biomass reaction to be able to identify universal biomass components or calculate detailed precursor stoichiometries. Parameters ---------- model : cobra.Model The metabolic model under investigation. reaction : cobra.core.reaction.Reaction The biomass reaction of the model under investigation. Returns ------- list One or more reactions that qualify as THE biomass equation together. Notes ----- Counting H2O, ADP, Pi, H, and ATP, the amount of metabolites in a split reaction is comparatively low: Any reaction with less or equal to 15 metabolites can probably be counted as a split reaction containing Ash, Phospholipids, Fatty Acids, Carbohydrates (i.e. cell wall components), Protein, RNA, DNA, Cofactors and Vitamins, and Small Molecules. Any reaction with more than or equal to 28 metabolites, however, (21 AA + 3 Nucleotides (4-ATP) + 4 Deoxy-Nucleotides) can be considered a lumped reaction. Anything in between will be treated conservatively as a lumped reaction. For split reactions, after removing any of the metabolites associated with growth-associated energy expenditure (H2O, ADP, Pi, H, and ATP), the only remaining metabolites should be generalized macromolecule precursors e.g. Protein, Phospholipids etc. Each of these have their own composing reactions. Hence we include the reactions of these metabolites in the set that ultimately makes up the returned list of reactions that together make up the biomass equation. """ if len(reaction.metabolites) >= 16: return [reaction] id_of_main_compartment = helpers.find_compartment_id_in_model(model, 'c') gam_mets = ["MNXM3", "MNXM2", "MNXM7", "MNXM1", 'MNXM9'] try: gam = set([helpers.find_met_in_model( model, met, id_of_main_compartment)[0] for met in gam_mets]) except RuntimeError: gam = set() regex = re.compile('^{}(_[a-zA-Z]+?)*?$'.format('biomass'), re.IGNORECASE) biomass_metabolite = set(model.metabolites.query(regex)) macromolecules = set(reaction.metabolites) - gam - biomass_metabolite bundled_reactions = set() for met in macromolecules: bundled_reactions = bundled_reactions | set(met.reactions) return list(bundled_reactions)
[ "def", "bundle_biomass_components", "(", "model", ",", "reaction", ")", ":", "if", "len", "(", "reaction", ".", "metabolites", ")", ">=", "16", ":", "return", "[", "reaction", "]", "id_of_main_compartment", "=", "helpers", ".", "find_compartment_id_in_model", "(", "model", ",", "'c'", ")", "gam_mets", "=", "[", "\"MNXM3\"", ",", "\"MNXM2\"", ",", "\"MNXM7\"", ",", "\"MNXM1\"", ",", "'MNXM9'", "]", "try", ":", "gam", "=", "set", "(", "[", "helpers", ".", "find_met_in_model", "(", "model", ",", "met", ",", "id_of_main_compartment", ")", "[", "0", "]", "for", "met", "in", "gam_mets", "]", ")", "except", "RuntimeError", ":", "gam", "=", "set", "(", ")", "regex", "=", "re", ".", "compile", "(", "'^{}(_[a-zA-Z]+?)*?$'", ".", "format", "(", "'biomass'", ")", ",", "re", ".", "IGNORECASE", ")", "biomass_metabolite", "=", "set", "(", "model", ".", "metabolites", ".", "query", "(", "regex", ")", ")", "macromolecules", "=", "set", "(", "reaction", ".", "metabolites", ")", "-", "gam", "-", "biomass_metabolite", "bundled_reactions", "=", "set", "(", ")", "for", "met", "in", "macromolecules", ":", "bundled_reactions", "=", "bundled_reactions", "|", "set", "(", "met", ".", "reactions", ")", "return", "list", "(", "bundled_reactions", ")" ]
Return bundle biomass component reactions if it is not one lumped reaction. There are two basic ways of specifying the biomass composition. The most common is a single lumped reaction containing all biomass precursors. Alternatively, the biomass equation can be split into several reactions each focusing on a different macromolecular component for instance a (1 gDW ash) + b (1 gDW phospholipids) + c (free fatty acids)+ d (1 gDW carbs) + e (1 gDW protein) + f (1 gDW RNA) + g (1 gDW DNA) + h (vitamins/cofactors) + xATP + xH2O-> 1 gDCW biomass + xADP + xH + xPi. This function aims to identify if the given biomass reaction 'reaction', is a lumped all-in-one reaction, or whether it is just the final composing reaction of all macromolecular components. It is important to identify which other reaction belong to a given biomass reaction to be able to identify universal biomass components or calculate detailed precursor stoichiometries. Parameters ---------- model : cobra.Model The metabolic model under investigation. reaction : cobra.core.reaction.Reaction The biomass reaction of the model under investigation. Returns ------- list One or more reactions that qualify as THE biomass equation together. Notes ----- Counting H2O, ADP, Pi, H, and ATP, the amount of metabolites in a split reaction is comparatively low: Any reaction with less or equal to 15 metabolites can probably be counted as a split reaction containing Ash, Phospholipids, Fatty Acids, Carbohydrates (i.e. cell wall components), Protein, RNA, DNA, Cofactors and Vitamins, and Small Molecules. Any reaction with more than or equal to 28 metabolites, however, (21 AA + 3 Nucleotides (4-ATP) + 4 Deoxy-Nucleotides) can be considered a lumped reaction. Anything in between will be treated conservatively as a lumped reaction. For split reactions, after removing any of the metabolites associated with growth-associated energy expenditure (H2O, ADP, Pi, H, and ATP), the only remaining metabolites should be generalized macromolecule precursors e.g. Protein, Phospholipids etc. Each of these have their own composing reactions. Hence we include the reactions of these metabolites in the set that ultimately makes up the returned list of reactions that together make up the biomass equation.
[ "Return", "bundle", "biomass", "component", "reactions", "if", "it", "is", "not", "one", "lumped", "reaction", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/biomass.py#L337-L408
opencobra/memote
memote/support/biomass.py
essential_precursors_not_in_biomass
def essential_precursors_not_in_biomass(model, reaction): u""" Return a list of essential precursors missing from the biomass reaction. There are universal components of life that make up the biomass of all known organisms. These include all proteinogenic amino acids, deoxy- and ribonucleotides, water and a range of metabolic cofactors. Parameters ---------- model : cobra.Model The metabolic model under investigation. reaction : cobra.core.reaction.Reaction The biomass reaction of the model under investigation. Returns ------- list IDs of essential metabolites missing from the biomass reaction. The IDS will appear in the models namespace if the metabolite exists, but will be using the MetaNetX namespace if the metabolite does not exist in the model. Notes ----- "Answering the question of what to include in the core of a biomass objective function is not always straightforward. One example is different nucleotide forms, which, although inter-convertible, are essential for cellular chemistry. We propose here that all essential and irreplaceable molecules for metabolism should be included in the biomass functions of genome scale metabolic models. In the special case of cofactors, when two forms of the same cofactor take part in the same reactions (such as NAD and NADH), only one form could be included for the sake of simplicity. When a class of cofactors includes active and non-active interconvertible forms, the active forms should be preferred. [1]_." Please note, that [1]_ also suggest to count C1 carriers (derivatives of tetrahydrofolate(B9) or tetrahydromethanopterin) as universal cofactors. We have omitted these from this check because there are many individual compounds that classify as C1 carriers, and it is not clear a priori which one should be preferred. In a future update, we may consider identifying these using a chemical ontology. References ---------- .. [1] Xavier, J. C., Patil, K. R., & Rocha, I. (2017). Integration of Biomass Formulations of Genome-Scale Metabolic Models with Experimental Data Reveals Universally Essential Cofactors in Prokaryotes. Metabolic Engineering, 39(October 2016), 200–208. http://doi.org/10.1016/j.ymben.2016.12.002 """ main_comp = helpers.find_compartment_id_in_model(model, 'c') biomass_eq = bundle_biomass_components(model, reaction) pooled_precursors = set( [met for rxn in biomass_eq for met in rxn.metabolites]) missing_essential_precursors = [] for mnx_id in ESSENTIAL_PRECURSOR_IDS: try: met = helpers.find_met_in_model(model, mnx_id, main_comp)[0] if met not in pooled_precursors: missing_essential_precursors.append(met.id) except RuntimeError: missing_essential_precursors.append(mnx_id) return missing_essential_precursors
python
def essential_precursors_not_in_biomass(model, reaction): u""" Return a list of essential precursors missing from the biomass reaction. There are universal components of life that make up the biomass of all known organisms. These include all proteinogenic amino acids, deoxy- and ribonucleotides, water and a range of metabolic cofactors. Parameters ---------- model : cobra.Model The metabolic model under investigation. reaction : cobra.core.reaction.Reaction The biomass reaction of the model under investigation. Returns ------- list IDs of essential metabolites missing from the biomass reaction. The IDS will appear in the models namespace if the metabolite exists, but will be using the MetaNetX namespace if the metabolite does not exist in the model. Notes ----- "Answering the question of what to include in the core of a biomass objective function is not always straightforward. One example is different nucleotide forms, which, although inter-convertible, are essential for cellular chemistry. We propose here that all essential and irreplaceable molecules for metabolism should be included in the biomass functions of genome scale metabolic models. In the special case of cofactors, when two forms of the same cofactor take part in the same reactions (such as NAD and NADH), only one form could be included for the sake of simplicity. When a class of cofactors includes active and non-active interconvertible forms, the active forms should be preferred. [1]_." Please note, that [1]_ also suggest to count C1 carriers (derivatives of tetrahydrofolate(B9) or tetrahydromethanopterin) as universal cofactors. We have omitted these from this check because there are many individual compounds that classify as C1 carriers, and it is not clear a priori which one should be preferred. In a future update, we may consider identifying these using a chemical ontology. References ---------- .. [1] Xavier, J. C., Patil, K. R., & Rocha, I. (2017). Integration of Biomass Formulations of Genome-Scale Metabolic Models with Experimental Data Reveals Universally Essential Cofactors in Prokaryotes. Metabolic Engineering, 39(October 2016), 200–208. http://doi.org/10.1016/j.ymben.2016.12.002 """ main_comp = helpers.find_compartment_id_in_model(model, 'c') biomass_eq = bundle_biomass_components(model, reaction) pooled_precursors = set( [met for rxn in biomass_eq for met in rxn.metabolites]) missing_essential_precursors = [] for mnx_id in ESSENTIAL_PRECURSOR_IDS: try: met = helpers.find_met_in_model(model, mnx_id, main_comp)[0] if met not in pooled_precursors: missing_essential_precursors.append(met.id) except RuntimeError: missing_essential_precursors.append(mnx_id) return missing_essential_precursors
[ "def", "essential_precursors_not_in_biomass", "(", "model", ",", "reaction", ")", ":", "main_comp", "=", "helpers", ".", "find_compartment_id_in_model", "(", "model", ",", "'c'", ")", "biomass_eq", "=", "bundle_biomass_components", "(", "model", ",", "reaction", ")", "pooled_precursors", "=", "set", "(", "[", "met", "for", "rxn", "in", "biomass_eq", "for", "met", "in", "rxn", ".", "metabolites", "]", ")", "missing_essential_precursors", "=", "[", "]", "for", "mnx_id", "in", "ESSENTIAL_PRECURSOR_IDS", ":", "try", ":", "met", "=", "helpers", ".", "find_met_in_model", "(", "model", ",", "mnx_id", ",", "main_comp", ")", "[", "0", "]", "if", "met", "not", "in", "pooled_precursors", ":", "missing_essential_precursors", ".", "append", "(", "met", ".", "id", ")", "except", "RuntimeError", ":", "missing_essential_precursors", ".", "append", "(", "mnx_id", ")", "return", "missing_essential_precursors" ]
u""" Return a list of essential precursors missing from the biomass reaction. There are universal components of life that make up the biomass of all known organisms. These include all proteinogenic amino acids, deoxy- and ribonucleotides, water and a range of metabolic cofactors. Parameters ---------- model : cobra.Model The metabolic model under investigation. reaction : cobra.core.reaction.Reaction The biomass reaction of the model under investigation. Returns ------- list IDs of essential metabolites missing from the biomass reaction. The IDS will appear in the models namespace if the metabolite exists, but will be using the MetaNetX namespace if the metabolite does not exist in the model. Notes ----- "Answering the question of what to include in the core of a biomass objective function is not always straightforward. One example is different nucleotide forms, which, although inter-convertible, are essential for cellular chemistry. We propose here that all essential and irreplaceable molecules for metabolism should be included in the biomass functions of genome scale metabolic models. In the special case of cofactors, when two forms of the same cofactor take part in the same reactions (such as NAD and NADH), only one form could be included for the sake of simplicity. When a class of cofactors includes active and non-active interconvertible forms, the active forms should be preferred. [1]_." Please note, that [1]_ also suggest to count C1 carriers (derivatives of tetrahydrofolate(B9) or tetrahydromethanopterin) as universal cofactors. We have omitted these from this check because there are many individual compounds that classify as C1 carriers, and it is not clear a priori which one should be preferred. In a future update, we may consider identifying these using a chemical ontology. References ---------- .. [1] Xavier, J. C., Patil, K. R., & Rocha, I. (2017). Integration of Biomass Formulations of Genome-Scale Metabolic Models with Experimental Data Reveals Universally Essential Cofactors in Prokaryotes. Metabolic Engineering, 39(October 2016), 200–208. http://doi.org/10.1016/j.ymben.2016.12.002
[ "u", "Return", "a", "list", "of", "essential", "precursors", "missing", "from", "the", "biomass", "reaction", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/biomass.py#L411-L477
opencobra/memote
memote/suite/cli/callbacks.py
validate_experimental
def validate_experimental(context, param, value): """Load and validate an experimental data configuration.""" if value is None: return config = ExperimentConfiguration(value) config.validate() return config
python
def validate_experimental(context, param, value): """Load and validate an experimental data configuration.""" if value is None: return config = ExperimentConfiguration(value) config.validate() return config
[ "def", "validate_experimental", "(", "context", ",", "param", ",", "value", ")", ":", "if", "value", "is", "None", ":", "return", "config", "=", "ExperimentConfiguration", "(", "value", ")", "config", ".", "validate", "(", ")", "return", "config" ]
Load and validate an experimental data configuration.
[ "Load", "and", "validate", "an", "experimental", "data", "configuration", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/suite/cli/callbacks.py#L44-L50
opencobra/memote
memote/suite/cli/callbacks.py
probe_git
def probe_git(): """Return a git repository instance if it exists.""" try: repo = git.Repo() except git.InvalidGitRepositoryError: LOGGER.warning( "We highly recommend keeping your model in a git repository." " It allows you to track changes and to easily collaborate with" " others via online platforms such as https://github.com.\n") return if repo.is_dirty(): LOGGER.critical( "Please git commit or git stash all changes before running" " the memote suite.") sys.exit(1) return repo
python
def probe_git(): """Return a git repository instance if it exists.""" try: repo = git.Repo() except git.InvalidGitRepositoryError: LOGGER.warning( "We highly recommend keeping your model in a git repository." " It allows you to track changes and to easily collaborate with" " others via online platforms such as https://github.com.\n") return if repo.is_dirty(): LOGGER.critical( "Please git commit or git stash all changes before running" " the memote suite.") sys.exit(1) return repo
[ "def", "probe_git", "(", ")", ":", "try", ":", "repo", "=", "git", ".", "Repo", "(", ")", "except", "git", ".", "InvalidGitRepositoryError", ":", "LOGGER", ".", "warning", "(", "\"We highly recommend keeping your model in a git repository.\"", "\" It allows you to track changes and to easily collaborate with\"", "\" others via online platforms such as https://github.com.\\n\"", ")", "return", "if", "repo", ".", "is_dirty", "(", ")", ":", "LOGGER", ".", "critical", "(", "\"Please git commit or git stash all changes before running\"", "\" the memote suite.\"", ")", "sys", ".", "exit", "(", "1", ")", "return", "repo" ]
Return a git repository instance if it exists.
[ "Return", "a", "git", "repository", "instance", "if", "it", "exists", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/suite/cli/callbacks.py#L79-L94
opencobra/memote
memote/suite/cli/callbacks.py
git_installed
def git_installed(): """Interrupt execution of memote if `git` has not been installed.""" LOGGER.info("Checking `git` installation.") try: check_output(['git', '--version']) except CalledProcessError as e: LOGGER.critical( "The execution of memote was interrupted since no installation of " "`git` could be detected. Please install git to use " "this functionality: " "https://git-scm.com/book/en/v2/Getting-Started-Installing-Git") LOGGER.debug("Underlying error:", exc_info=e) sys.exit(1)
python
def git_installed(): """Interrupt execution of memote if `git` has not been installed.""" LOGGER.info("Checking `git` installation.") try: check_output(['git', '--version']) except CalledProcessError as e: LOGGER.critical( "The execution of memote was interrupted since no installation of " "`git` could be detected. Please install git to use " "this functionality: " "https://git-scm.com/book/en/v2/Getting-Started-Installing-Git") LOGGER.debug("Underlying error:", exc_info=e) sys.exit(1)
[ "def", "git_installed", "(", ")", ":", "LOGGER", ".", "info", "(", "\"Checking `git` installation.\"", ")", "try", ":", "check_output", "(", "[", "'git'", ",", "'--version'", "]", ")", "except", "CalledProcessError", "as", "e", ":", "LOGGER", ".", "critical", "(", "\"The execution of memote was interrupted since no installation of \"", "\"`git` could be detected. Please install git to use \"", "\"this functionality: \"", "\"https://git-scm.com/book/en/v2/Getting-Started-Installing-Git\"", ")", "LOGGER", ".", "debug", "(", "\"Underlying error:\"", ",", "exc_info", "=", "e", ")", "sys", ".", "exit", "(", "1", ")" ]
Interrupt execution of memote if `git` has not been installed.
[ "Interrupt", "execution", "of", "memote", "if", "git", "has", "not", "been", "installed", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/suite/cli/callbacks.py#L103-L115
opencobra/memote
memote/suite/results/repo_result_manager.py
RepoResultManager.record_git_info
def record_git_info(self, commit=None): """ Record git meta information. Parameters ---------- commit : str, optional Unique hexsha of the desired commit. Returns ------- GitInfo Git commit meta information. """ if commit is None: commit = self._repo.head.commit else: commit = self._repo.commit(commit) return GitInfo( hexsha=commit.hexsha, author=commit.author.name, email=commit.author.email, authored_on=commit.authored_datetime )
python
def record_git_info(self, commit=None): """ Record git meta information. Parameters ---------- commit : str, optional Unique hexsha of the desired commit. Returns ------- GitInfo Git commit meta information. """ if commit is None: commit = self._repo.head.commit else: commit = self._repo.commit(commit) return GitInfo( hexsha=commit.hexsha, author=commit.author.name, email=commit.author.email, authored_on=commit.authored_datetime )
[ "def", "record_git_info", "(", "self", ",", "commit", "=", "None", ")", ":", "if", "commit", "is", "None", ":", "commit", "=", "self", ".", "_repo", ".", "head", ".", "commit", "else", ":", "commit", "=", "self", ".", "_repo", ".", "commit", "(", "commit", ")", "return", "GitInfo", "(", "hexsha", "=", "commit", ".", "hexsha", ",", "author", "=", "commit", ".", "author", ".", "name", ",", "email", "=", "commit", ".", "author", ".", "email", ",", "authored_on", "=", "commit", ".", "authored_datetime", ")" ]
Record git meta information. Parameters ---------- commit : str, optional Unique hexsha of the desired commit. Returns ------- GitInfo Git commit meta information.
[ "Record", "git", "meta", "information", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/suite/results/repo_result_manager.py#L61-L85
opencobra/memote
memote/suite/results/repo_result_manager.py
RepoResultManager.add_git
def add_git(meta, git_info): """Enrich the result meta information with commit data.""" meta["hexsha"] = git_info.hexsha meta["author"] = git_info.author meta["email"] = git_info.email meta["authored_on"] = git_info.authored_on.isoformat(" ")
python
def add_git(meta, git_info): """Enrich the result meta information with commit data.""" meta["hexsha"] = git_info.hexsha meta["author"] = git_info.author meta["email"] = git_info.email meta["authored_on"] = git_info.authored_on.isoformat(" ")
[ "def", "add_git", "(", "meta", ",", "git_info", ")", ":", "meta", "[", "\"hexsha\"", "]", "=", "git_info", ".", "hexsha", "meta", "[", "\"author\"", "]", "=", "git_info", ".", "author", "meta", "[", "\"email\"", "]", "=", "git_info", ".", "email", "meta", "[", "\"authored_on\"", "]", "=", "git_info", ".", "authored_on", ".", "isoformat", "(", "\" \"", ")" ]
Enrich the result meta information with commit data.
[ "Enrich", "the", "result", "meta", "information", "with", "commit", "data", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/suite/results/repo_result_manager.py#L106-L111
opencobra/memote
memote/suite/results/repo_result_manager.py
RepoResultManager.store
def store(self, result, commit=None, **kwargs): """ Store a result in a JSON file attaching git meta information. Parameters ---------- result : memote.MemoteResult The dictionary structure of results. commit : str, optional Unique hexsha of the desired commit. kwargs : Passed to parent function. """ git_info = self.record_git_info(commit) self.add_git(result.meta, git_info) filename = self.get_filename(git_info) super(RepoResultManager, self).store( result, filename=filename, **kwargs)
python
def store(self, result, commit=None, **kwargs): """ Store a result in a JSON file attaching git meta information. Parameters ---------- result : memote.MemoteResult The dictionary structure of results. commit : str, optional Unique hexsha of the desired commit. kwargs : Passed to parent function. """ git_info = self.record_git_info(commit) self.add_git(result.meta, git_info) filename = self.get_filename(git_info) super(RepoResultManager, self).store( result, filename=filename, **kwargs)
[ "def", "store", "(", "self", ",", "result", ",", "commit", "=", "None", ",", "*", "*", "kwargs", ")", ":", "git_info", "=", "self", ".", "record_git_info", "(", "commit", ")", "self", ".", "add_git", "(", "result", ".", "meta", ",", "git_info", ")", "filename", "=", "self", ".", "get_filename", "(", "git_info", ")", "super", "(", "RepoResultManager", ",", "self", ")", ".", "store", "(", "result", ",", "filename", "=", "filename", ",", "*", "*", "kwargs", ")" ]
Store a result in a JSON file attaching git meta information. Parameters ---------- result : memote.MemoteResult The dictionary structure of results. commit : str, optional Unique hexsha of the desired commit. kwargs : Passed to parent function.
[ "Store", "a", "result", "in", "a", "JSON", "file", "attaching", "git", "meta", "information", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/suite/results/repo_result_manager.py#L113-L131
opencobra/memote
memote/suite/results/repo_result_manager.py
RepoResultManager.load
def load(self, commit=None): """Load a result from the storage directory.""" git_info = self.record_git_info(commit) LOGGER.debug("Loading the result for commit '%s'.", git_info.hexsha) filename = self.get_filename(git_info) LOGGER.debug("Loading the result '%s'.", filename) result = super(RepoResultManager, self).load(filename) self.add_git(result.meta, git_info) return result
python
def load(self, commit=None): """Load a result from the storage directory.""" git_info = self.record_git_info(commit) LOGGER.debug("Loading the result for commit '%s'.", git_info.hexsha) filename = self.get_filename(git_info) LOGGER.debug("Loading the result '%s'.", filename) result = super(RepoResultManager, self).load(filename) self.add_git(result.meta, git_info) return result
[ "def", "load", "(", "self", ",", "commit", "=", "None", ")", ":", "git_info", "=", "self", ".", "record_git_info", "(", "commit", ")", "LOGGER", ".", "debug", "(", "\"Loading the result for commit '%s'.\"", ",", "git_info", ".", "hexsha", ")", "filename", "=", "self", ".", "get_filename", "(", "git_info", ")", "LOGGER", ".", "debug", "(", "\"Loading the result '%s'.\"", ",", "filename", ")", "result", "=", "super", "(", "RepoResultManager", ",", "self", ")", ".", "load", "(", "filename", ")", "self", ".", "add_git", "(", "result", ".", "meta", ",", "git_info", ")", "return", "result" ]
Load a result from the storage directory.
[ "Load", "a", "result", "from", "the", "storage", "directory", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/suite/results/repo_result_manager.py#L133-L141
opencobra/memote
memote/jinja2_extension.py
MemoteExtension.normalize
def normalize(filename): """Return an absolute path of the given file name.""" # Default value means we do not resolve a model file. if filename == "default": return filename filename = expanduser(filename) if isabs(filename): return filename else: return join(os.getcwd(), filename)
python
def normalize(filename): """Return an absolute path of the given file name.""" # Default value means we do not resolve a model file. if filename == "default": return filename filename = expanduser(filename) if isabs(filename): return filename else: return join(os.getcwd(), filename)
[ "def", "normalize", "(", "filename", ")", ":", "# Default value means we do not resolve a model file.", "if", "filename", "==", "\"default\"", ":", "return", "filename", "filename", "=", "expanduser", "(", "filename", ")", "if", "isabs", "(", "filename", ")", ":", "return", "filename", "else", ":", "return", "join", "(", "os", ".", "getcwd", "(", ")", ",", "filename", ")" ]
Return an absolute path of the given file name.
[ "Return", "an", "absolute", "path", "of", "the", "given", "file", "name", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/jinja2_extension.py#L42-L51
opencobra/memote
memote/experimental/growth.py
GrowthExperiment.load
def load(self, dtype_conversion=None): """ Load the data table and corresponding validation schema. Parameters ---------- dtype_conversion : dict Column names as keys and corresponding type for loading the data. Please take a look at the `pandas documentation <https://pandas.pydata.org/pandas-docs/stable/io.html#specifying-column-data-types>`__ for detailed explanations. """ if dtype_conversion is None: dtype_conversion = {"growth": str} super(GrowthExperiment, self).load(dtype_conversion=dtype_conversion) self.data["growth"] = self.data["growth"].isin(self.TRUTHY)
python
def load(self, dtype_conversion=None): """ Load the data table and corresponding validation schema. Parameters ---------- dtype_conversion : dict Column names as keys and corresponding type for loading the data. Please take a look at the `pandas documentation <https://pandas.pydata.org/pandas-docs/stable/io.html#specifying-column-data-types>`__ for detailed explanations. """ if dtype_conversion is None: dtype_conversion = {"growth": str} super(GrowthExperiment, self).load(dtype_conversion=dtype_conversion) self.data["growth"] = self.data["growth"].isin(self.TRUTHY)
[ "def", "load", "(", "self", ",", "dtype_conversion", "=", "None", ")", ":", "if", "dtype_conversion", "is", "None", ":", "dtype_conversion", "=", "{", "\"growth\"", ":", "str", "}", "super", "(", "GrowthExperiment", ",", "self", ")", ".", "load", "(", "dtype_conversion", "=", "dtype_conversion", ")", "self", ".", "data", "[", "\"growth\"", "]", "=", "self", ".", "data", "[", "\"growth\"", "]", ".", "isin", "(", "self", ".", "TRUTHY", ")" ]
Load the data table and corresponding validation schema. Parameters ---------- dtype_conversion : dict Column names as keys and corresponding type for loading the data. Please take a look at the `pandas documentation <https://pandas.pydata.org/pandas-docs/stable/io.html#specifying-column-data-types>`__ for detailed explanations.
[ "Load", "the", "data", "table", "and", "corresponding", "validation", "schema", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/experimental/growth.py#L49-L65
opencobra/memote
memote/experimental/growth.py
GrowthExperiment.evaluate
def evaluate(self, model, threshold=0.1): """Evaluate in silico growth rates.""" with model: if self.medium is not None: self.medium.apply(model) if self.objective is not None: model.objective = self.objective model.add_cons_vars(self.constraints) threshold *= model.slim_optimize() growth = list() for row in self.data.itertuples(index=False): with model: exchange = model.reactions.get_by_id(row.exchange) if bool(exchange.reactants): exchange.lower_bound = -row.uptake else: exchange.upper_bound = row.uptake growth.append(model.slim_optimize() >= threshold) return DataFrame({ "exchange": self.data["exchange"], "growth": growth })
python
def evaluate(self, model, threshold=0.1): """Evaluate in silico growth rates.""" with model: if self.medium is not None: self.medium.apply(model) if self.objective is not None: model.objective = self.objective model.add_cons_vars(self.constraints) threshold *= model.slim_optimize() growth = list() for row in self.data.itertuples(index=False): with model: exchange = model.reactions.get_by_id(row.exchange) if bool(exchange.reactants): exchange.lower_bound = -row.uptake else: exchange.upper_bound = row.uptake growth.append(model.slim_optimize() >= threshold) return DataFrame({ "exchange": self.data["exchange"], "growth": growth })
[ "def", "evaluate", "(", "self", ",", "model", ",", "threshold", "=", "0.1", ")", ":", "with", "model", ":", "if", "self", ".", "medium", "is", "not", "None", ":", "self", ".", "medium", ".", "apply", "(", "model", ")", "if", "self", ".", "objective", "is", "not", "None", ":", "model", ".", "objective", "=", "self", ".", "objective", "model", ".", "add_cons_vars", "(", "self", ".", "constraints", ")", "threshold", "*=", "model", ".", "slim_optimize", "(", ")", "growth", "=", "list", "(", ")", "for", "row", "in", "self", ".", "data", ".", "itertuples", "(", "index", "=", "False", ")", ":", "with", "model", ":", "exchange", "=", "model", ".", "reactions", ".", "get_by_id", "(", "row", ".", "exchange", ")", "if", "bool", "(", "exchange", ".", "reactants", ")", ":", "exchange", ".", "lower_bound", "=", "-", "row", ".", "uptake", "else", ":", "exchange", ".", "upper_bound", "=", "row", ".", "uptake", "growth", ".", "append", "(", "model", ".", "slim_optimize", "(", ")", ">=", "threshold", ")", "return", "DataFrame", "(", "{", "\"exchange\"", ":", "self", ".", "data", "[", "\"exchange\"", "]", ",", "\"growth\"", ":", "growth", "}", ")" ]
Evaluate in silico growth rates.
[ "Evaluate", "in", "silico", "growth", "rates", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/experimental/growth.py#L67-L88
opencobra/memote
memote/suite/results/models.py
BJSON.process_bind_param
def process_bind_param(self, value, dialect): """Convert the value to a JSON encoded string before storing it.""" try: with BytesIO() as stream: with GzipFile(fileobj=stream, mode="wb") as file_handle: file_handle.write( jsonify(value, pretty=False).encode("utf-8") ) output = stream.getvalue() return output except TypeError as error: log_json_incompatible_types(value) raise_with_traceback(error)
python
def process_bind_param(self, value, dialect): """Convert the value to a JSON encoded string before storing it.""" try: with BytesIO() as stream: with GzipFile(fileobj=stream, mode="wb") as file_handle: file_handle.write( jsonify(value, pretty=False).encode("utf-8") ) output = stream.getvalue() return output except TypeError as error: log_json_incompatible_types(value) raise_with_traceback(error)
[ "def", "process_bind_param", "(", "self", ",", "value", ",", "dialect", ")", ":", "try", ":", "with", "BytesIO", "(", ")", "as", "stream", ":", "with", "GzipFile", "(", "fileobj", "=", "stream", ",", "mode", "=", "\"wb\"", ")", "as", "file_handle", ":", "file_handle", ".", "write", "(", "jsonify", "(", "value", ",", "pretty", "=", "False", ")", ".", "encode", "(", "\"utf-8\"", ")", ")", "output", "=", "stream", ".", "getvalue", "(", ")", "return", "output", "except", "TypeError", "as", "error", ":", "log_json_incompatible_types", "(", "value", ")", "raise_with_traceback", "(", "error", ")" ]
Convert the value to a JSON encoded string before storing it.
[ "Convert", "the", "value", "to", "a", "JSON", "encoded", "string", "before", "storing", "it", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/suite/results/models.py#L70-L82
opencobra/memote
memote/suite/results/models.py
BJSON.process_result_value
def process_result_value(self, value, dialect): """Convert a JSON encoded string to a dictionary structure.""" if value is not None: with BytesIO(value) as stream: with GzipFile(fileobj=stream, mode="rb") as file_handle: value = json.loads(file_handle.read().decode("utf-8")) return value
python
def process_result_value(self, value, dialect): """Convert a JSON encoded string to a dictionary structure.""" if value is not None: with BytesIO(value) as stream: with GzipFile(fileobj=stream, mode="rb") as file_handle: value = json.loads(file_handle.read().decode("utf-8")) return value
[ "def", "process_result_value", "(", "self", ",", "value", ",", "dialect", ")", ":", "if", "value", "is", "not", "None", ":", "with", "BytesIO", "(", "value", ")", "as", "stream", ":", "with", "GzipFile", "(", "fileobj", "=", "stream", ",", "mode", "=", "\"rb\"", ")", "as", "file_handle", ":", "value", "=", "json", ".", "loads", "(", "file_handle", ".", "read", "(", ")", ".", "decode", "(", "\"utf-8\"", ")", ")", "return", "value" ]
Convert a JSON encoded string to a dictionary structure.
[ "Convert", "a", "JSON", "encoded", "string", "to", "a", "dictionary", "structure", "." ]
train
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/suite/results/models.py#L84-L90
pawamoy/django-zxcvbn-password
src/zxcvbn_password/widgets.py
PasswordStrengthInput.render
def render(self, name, value, attrs=None, **kwargs): """Widget render method.""" min_score = zxcvbn_min_score() message_title = _('Warning') message_body = _( 'This password would take ' '<em class="password_strength_time"></em> to crack.') strength_markup = """ <div class="progress-bloc" style="margin-top: 10px;"> <div class="progress" style="margin-bottom: 10px;"> <div class="progress-bar progress-bar-warning password_strength_bar" role="progressbar" aria-valuenow="0" aria-valuemin="{min_score}" aria-valuemax="4" style="width: 0%%"> </div> </div> <p class="text-muted password_strength_info hidden"> <span class="label label-danger"> {title} </span> <span style="margin-left:5px;"> {body} </span> </p> </div> """ strength_markup = strength_markup.format( title=message_title, body=message_body, min_score=min_score) try: self.attrs['class'] = '%s password_strength'.strip() % self.attrs['class'] # noqa except KeyError: self.attrs['class'] = 'password_strength' return mark_safe(super(PasswordStrengthInput, self).render( # nosec name, value, attrs) + strength_markup)
python
def render(self, name, value, attrs=None, **kwargs): """Widget render method.""" min_score = zxcvbn_min_score() message_title = _('Warning') message_body = _( 'This password would take ' '<em class="password_strength_time"></em> to crack.') strength_markup = """ <div class="progress-bloc" style="margin-top: 10px;"> <div class="progress" style="margin-bottom: 10px;"> <div class="progress-bar progress-bar-warning password_strength_bar" role="progressbar" aria-valuenow="0" aria-valuemin="{min_score}" aria-valuemax="4" style="width: 0%%"> </div> </div> <p class="text-muted password_strength_info hidden"> <span class="label label-danger"> {title} </span> <span style="margin-left:5px;"> {body} </span> </p> </div> """ strength_markup = strength_markup.format( title=message_title, body=message_body, min_score=min_score) try: self.attrs['class'] = '%s password_strength'.strip() % self.attrs['class'] # noqa except KeyError: self.attrs['class'] = 'password_strength' return mark_safe(super(PasswordStrengthInput, self).render( # nosec name, value, attrs) + strength_markup)
[ "def", "render", "(", "self", ",", "name", ",", "value", ",", "attrs", "=", "None", ",", "*", "*", "kwargs", ")", ":", "min_score", "=", "zxcvbn_min_score", "(", ")", "message_title", "=", "_", "(", "'Warning'", ")", "message_body", "=", "_", "(", "'This password would take '", "'<em class=\"password_strength_time\"></em> to crack.'", ")", "strength_markup", "=", "\"\"\"\n <div class=\"progress-bloc\" style=\"margin-top: 10px;\">\n <div class=\"progress\" style=\"margin-bottom: 10px;\">\n <div class=\"progress-bar\n progress-bar-warning\n password_strength_bar\"\n role=\"progressbar\"\n aria-valuenow=\"0\"\n aria-valuemin=\"{min_score}\"\n aria-valuemax=\"4\"\n style=\"width: 0%%\">\n </div>\n </div>\n <p class=\"text-muted password_strength_info hidden\">\n <span class=\"label label-danger\">\n {title}\n </span>\n <span style=\"margin-left:5px;\">\n {body}\n </span>\n </p>\n </div>\n \"\"\"", "strength_markup", "=", "strength_markup", ".", "format", "(", "title", "=", "message_title", ",", "body", "=", "message_body", ",", "min_score", "=", "min_score", ")", "try", ":", "self", ".", "attrs", "[", "'class'", "]", "=", "'%s password_strength'", ".", "strip", "(", ")", "%", "self", ".", "attrs", "[", "'class'", "]", "# noqa", "except", "KeyError", ":", "self", ".", "attrs", "[", "'class'", "]", "=", "'password_strength'", "return", "mark_safe", "(", "super", "(", "PasswordStrengthInput", ",", "self", ")", ".", "render", "(", "# nosec", "name", ",", "value", ",", "attrs", ")", "+", "strength_markup", ")" ]
Widget render method.
[ "Widget", "render", "method", "." ]
train
https://github.com/pawamoy/django-zxcvbn-password/blob/7c6d37099da0f130d6ab88a0f941b6de476a0f86/src/zxcvbn_password/widgets.py#L17-L60
pawamoy/django-zxcvbn-password
src/zxcvbn_password/widgets.py
PasswordConfirmationInput.render
def render(self, name, value, attrs=None, **kwargs): """Widget render method.""" if self.confirm_with: self.attrs['data-confirm-with'] = 'id_%s' % self.confirm_with confirmation_markup = """ <div style="margin-top: 10px;" class="hidden password_strength_info"> <p class="text-muted"> <span class="label label-danger"> %s </span> <span style="margin-left:5px;">%s</span> </p> </div> """ % (_('Warning'), _("Your passwords don't match.")) try: self.attrs['class'] = '%s password_confirmation'.strip() % self.attrs['class'] # noqa except KeyError: self.attrs['class'] = 'password_confirmation' return mark_safe(super(PasswordConfirmationInput, self).render( # nosec name, value, attrs) + confirmation_markup)
python
def render(self, name, value, attrs=None, **kwargs): """Widget render method.""" if self.confirm_with: self.attrs['data-confirm-with'] = 'id_%s' % self.confirm_with confirmation_markup = """ <div style="margin-top: 10px;" class="hidden password_strength_info"> <p class="text-muted"> <span class="label label-danger"> %s </span> <span style="margin-left:5px;">%s</span> </p> </div> """ % (_('Warning'), _("Your passwords don't match.")) try: self.attrs['class'] = '%s password_confirmation'.strip() % self.attrs['class'] # noqa except KeyError: self.attrs['class'] = 'password_confirmation' return mark_safe(super(PasswordConfirmationInput, self).render( # nosec name, value, attrs) + confirmation_markup)
[ "def", "render", "(", "self", ",", "name", ",", "value", ",", "attrs", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "confirm_with", ":", "self", ".", "attrs", "[", "'data-confirm-with'", "]", "=", "'id_%s'", "%", "self", ".", "confirm_with", "confirmation_markup", "=", "\"\"\"\n <div style=\"margin-top: 10px;\" class=\"hidden password_strength_info\">\n <p class=\"text-muted\">\n <span class=\"label label-danger\">\n %s\n </span>\n <span style=\"margin-left:5px;\">%s</span>\n </p>\n </div>\n \"\"\"", "%", "(", "_", "(", "'Warning'", ")", ",", "_", "(", "\"Your passwords don't match.\"", ")", ")", "try", ":", "self", ".", "attrs", "[", "'class'", "]", "=", "'%s password_confirmation'", ".", "strip", "(", ")", "%", "self", ".", "attrs", "[", "'class'", "]", "# noqa", "except", "KeyError", ":", "self", ".", "attrs", "[", "'class'", "]", "=", "'password_confirmation'", "return", "mark_safe", "(", "super", "(", "PasswordConfirmationInput", ",", "self", ")", ".", "render", "(", "# nosec", "name", ",", "value", ",", "attrs", ")", "+", "confirmation_markup", ")" ]
Widget render method.
[ "Widget", "render", "method", "." ]
train
https://github.com/pawamoy/django-zxcvbn-password/blob/7c6d37099da0f130d6ab88a0f941b6de476a0f86/src/zxcvbn_password/widgets.py#L79-L101
pawamoy/django-zxcvbn-password
src/zxcvbn_password/validators.py
ZXCVBNValidator.validate
def validate(self, password, user=None): """Validate method, run zxcvbn and check score.""" user_inputs = [] if user is not None: for attribute in self.user_attributes: if hasattr(user, attribute): user_inputs.append(getattr(user, attribute)) results = zxcvbn(password, user_inputs=user_inputs) if results.get('score', 0) < self.min_score: feedback = ', '.join( results.get('feedback', {}).get('suggestions', [])) raise ValidationError(_(feedback), code=self.code, params={})
python
def validate(self, password, user=None): """Validate method, run zxcvbn and check score.""" user_inputs = [] if user is not None: for attribute in self.user_attributes: if hasattr(user, attribute): user_inputs.append(getattr(user, attribute)) results = zxcvbn(password, user_inputs=user_inputs) if results.get('score', 0) < self.min_score: feedback = ', '.join( results.get('feedback', {}).get('suggestions', [])) raise ValidationError(_(feedback), code=self.code, params={})
[ "def", "validate", "(", "self", ",", "password", ",", "user", "=", "None", ")", ":", "user_inputs", "=", "[", "]", "if", "user", "is", "not", "None", ":", "for", "attribute", "in", "self", ".", "user_attributes", ":", "if", "hasattr", "(", "user", ",", "attribute", ")", ":", "user_inputs", ".", "append", "(", "getattr", "(", "user", ",", "attribute", ")", ")", "results", "=", "zxcvbn", "(", "password", ",", "user_inputs", "=", "user_inputs", ")", "if", "results", ".", "get", "(", "'score'", ",", "0", ")", "<", "self", ".", "min_score", ":", "feedback", "=", "', '", ".", "join", "(", "results", ".", "get", "(", "'feedback'", ",", "{", "}", ")", ".", "get", "(", "'suggestions'", ",", "[", "]", ")", ")", "raise", "ValidationError", "(", "_", "(", "feedback", ")", ",", "code", "=", "self", ".", "code", ",", "params", "=", "{", "}", ")" ]
Validate method, run zxcvbn and check score.
[ "Validate", "method", "run", "zxcvbn", "and", "check", "score", "." ]
train
https://github.com/pawamoy/django-zxcvbn-password/blob/7c6d37099da0f130d6ab88a0f941b6de476a0f86/src/zxcvbn_password/validators.py#L42-L54
rossant/ipymd
ipymd/formats/atlas.py
_get_html_contents
def _get_html_contents(html): """Process a HTML block and detects whether it is a code block, a math block, or a regular HTML block.""" parser = MyHTMLParser() parser.feed(html) if parser.is_code: return ('code', parser.data.strip()) elif parser.is_math: return ('math', parser.data.strip()) else: return '', ''
python
def _get_html_contents(html): """Process a HTML block and detects whether it is a code block, a math block, or a regular HTML block.""" parser = MyHTMLParser() parser.feed(html) if parser.is_code: return ('code', parser.data.strip()) elif parser.is_math: return ('math', parser.data.strip()) else: return '', ''
[ "def", "_get_html_contents", "(", "html", ")", ":", "parser", "=", "MyHTMLParser", "(", ")", "parser", ".", "feed", "(", "html", ")", "if", "parser", ".", "is_code", ":", "return", "(", "'code'", ",", "parser", ".", "data", ".", "strip", "(", ")", ")", "elif", "parser", ".", "is_math", ":", "return", "(", "'math'", ",", "parser", ".", "data", ".", "strip", "(", ")", ")", "else", ":", "return", "''", ",", "''" ]
Process a HTML block and detects whether it is a code block, a math block, or a regular HTML block.
[ "Process", "a", "HTML", "block", "and", "detects", "whether", "it", "is", "a", "code", "block", "a", "math", "block", "or", "a", "regular", "HTML", "block", "." ]
train
https://github.com/rossant/ipymd/blob/d87c9ebc59d67fe78b0139ee00e0e5307682e303/ipymd/formats/atlas.py#L47-L57
rossant/ipymd
ipymd/core/format_manager.py
_is_path
def _is_path(s): """Return whether an object is a path.""" if isinstance(s, string_types): try: return op.exists(s) except (OSError, ValueError): return False else: return False
python
def _is_path(s): """Return whether an object is a path.""" if isinstance(s, string_types): try: return op.exists(s) except (OSError, ValueError): return False else: return False
[ "def", "_is_path", "(", "s", ")", ":", "if", "isinstance", "(", "s", ",", "string_types", ")", ":", "try", ":", "return", "op", ".", "exists", "(", "s", ")", "except", "(", "OSError", ",", "ValueError", ")", ":", "return", "False", "else", ":", "return", "False" ]
Return whether an object is a path.
[ "Return", "whether", "an", "object", "is", "a", "path", "." ]
train
https://github.com/rossant/ipymd/blob/d87c9ebc59d67fe78b0139ee00e0e5307682e303/ipymd/core/format_manager.py#L38-L46
rossant/ipymd
ipymd/core/format_manager.py
FormatManager.format_manager
def format_manager(cls): """Return the instance singleton, creating if necessary """ if cls._instance is None: # Discover the formats and register them with a new singleton. cls._instance = cls().register_entrypoints() return cls._instance
python
def format_manager(cls): """Return the instance singleton, creating if necessary """ if cls._instance is None: # Discover the formats and register them with a new singleton. cls._instance = cls().register_entrypoints() return cls._instance
[ "def", "format_manager", "(", "cls", ")", ":", "if", "cls", ".", "_instance", "is", "None", ":", "# Discover the formats and register them with a new singleton.", "cls", ".", "_instance", "=", "cls", "(", ")", ".", "register_entrypoints", "(", ")", "return", "cls", ".", "_instance" ]
Return the instance singleton, creating if necessary
[ "Return", "the", "instance", "singleton", "creating", "if", "necessary" ]
train
https://github.com/rossant/ipymd/blob/d87c9ebc59d67fe78b0139ee00e0e5307682e303/ipymd/core/format_manager.py#L79-L85
rossant/ipymd
ipymd/core/format_manager.py
FormatManager.register_entrypoints
def register_entrypoints(self): """Look through the `setup_tools` `entry_points` and load all of the formats. """ for spec in iter_entry_points(self.entry_point_group): format_properties = {"name": spec.name} try: format_properties.update(spec.load()) except (DistributionNotFound, ImportError) as err: self.log.info( "ipymd format {} could not be loaded: {}".format( spec.name, err)) continue self.register(**format_properties) return self
python
def register_entrypoints(self): """Look through the `setup_tools` `entry_points` and load all of the formats. """ for spec in iter_entry_points(self.entry_point_group): format_properties = {"name": spec.name} try: format_properties.update(spec.load()) except (DistributionNotFound, ImportError) as err: self.log.info( "ipymd format {} could not be loaded: {}".format( spec.name, err)) continue self.register(**format_properties) return self
[ "def", "register_entrypoints", "(", "self", ")", ":", "for", "spec", "in", "iter_entry_points", "(", "self", ".", "entry_point_group", ")", ":", "format_properties", "=", "{", "\"name\"", ":", "spec", ".", "name", "}", "try", ":", "format_properties", ".", "update", "(", "spec", ".", "load", "(", ")", ")", "except", "(", "DistributionNotFound", ",", "ImportError", ")", "as", "err", ":", "self", ".", "log", ".", "info", "(", "\"ipymd format {} could not be loaded: {}\"", ".", "format", "(", "spec", ".", "name", ",", "err", ")", ")", "continue", "self", ".", "register", "(", "*", "*", "format_properties", ")", "return", "self" ]
Look through the `setup_tools` `entry_points` and load all of the formats.
[ "Look", "through", "the", "setup_tools", "entry_points", "and", "load", "all", "of", "the", "formats", "." ]
train
https://github.com/rossant/ipymd/blob/d87c9ebc59d67fe78b0139ee00e0e5307682e303/ipymd/core/format_manager.py#L87-L103
rossant/ipymd
ipymd/core/format_manager.py
FormatManager.format_from_extension
def format_from_extension(self, extension): """Find a format from its extension.""" formats = [name for name, format in self._formats.items() if format.get('file_extension', None) == extension] if len(formats) == 0: return None elif len(formats) == 2: raise RuntimeError("Several extensions are registered with " "that extension; please specify the format " "explicitly.") else: return formats[0]
python
def format_from_extension(self, extension): """Find a format from its extension.""" formats = [name for name, format in self._formats.items() if format.get('file_extension', None) == extension] if len(formats) == 0: return None elif len(formats) == 2: raise RuntimeError("Several extensions are registered with " "that extension; please specify the format " "explicitly.") else: return formats[0]
[ "def", "format_from_extension", "(", "self", ",", "extension", ")", ":", "formats", "=", "[", "name", "for", "name", ",", "format", "in", "self", ".", "_formats", ".", "items", "(", ")", "if", "format", ".", "get", "(", "'file_extension'", ",", "None", ")", "==", "extension", "]", "if", "len", "(", "formats", ")", "==", "0", ":", "return", "None", "elif", "len", "(", "formats", ")", "==", "2", ":", "raise", "RuntimeError", "(", "\"Several extensions are registered with \"", "\"that extension; please specify the format \"", "\"explicitly.\"", ")", "else", ":", "return", "formats", "[", "0", "]" ]
Find a format from its extension.
[ "Find", "a", "format", "from", "its", "extension", "." ]
train
https://github.com/rossant/ipymd/blob/d87c9ebc59d67fe78b0139ee00e0e5307682e303/ipymd/core/format_manager.py#L148-L160
rossant/ipymd
ipymd/core/format_manager.py
FormatManager.load
def load(self, file, name=None): """Load a file. The format name can be specified explicitly or inferred from the file extension.""" if name is None: name = self.format_from_extension(op.splitext(file)[1]) file_format = self.file_type(name) if file_format == 'text': return _read_text(file) elif file_format == 'json': return _read_json(file) else: load_function = self._formats[name].get('load', None) if load_function is None: raise IOError("The format must declare a file type or " "load/save functions.") return load_function(file)
python
def load(self, file, name=None): """Load a file. The format name can be specified explicitly or inferred from the file extension.""" if name is None: name = self.format_from_extension(op.splitext(file)[1]) file_format = self.file_type(name) if file_format == 'text': return _read_text(file) elif file_format == 'json': return _read_json(file) else: load_function = self._formats[name].get('load', None) if load_function is None: raise IOError("The format must declare a file type or " "load/save functions.") return load_function(file)
[ "def", "load", "(", "self", ",", "file", ",", "name", "=", "None", ")", ":", "if", "name", "is", "None", ":", "name", "=", "self", ".", "format_from_extension", "(", "op", ".", "splitext", "(", "file", ")", "[", "1", "]", ")", "file_format", "=", "self", ".", "file_type", "(", "name", ")", "if", "file_format", "==", "'text'", ":", "return", "_read_text", "(", "file", ")", "elif", "file_format", "==", "'json'", ":", "return", "_read_json", "(", "file", ")", "else", ":", "load_function", "=", "self", ".", "_formats", "[", "name", "]", ".", "get", "(", "'load'", ",", "None", ")", "if", "load_function", "is", "None", ":", "raise", "IOError", "(", "\"The format must declare a file type or \"", "\"load/save functions.\"", ")", "return", "load_function", "(", "file", ")" ]
Load a file. The format name can be specified explicitly or inferred from the file extension.
[ "Load", "a", "file", ".", "The", "format", "name", "can", "be", "specified", "explicitly", "or", "inferred", "from", "the", "file", "extension", "." ]
train
https://github.com/rossant/ipymd/blob/d87c9ebc59d67fe78b0139ee00e0e5307682e303/ipymd/core/format_manager.py#L166-L181
rossant/ipymd
ipymd/core/format_manager.py
FormatManager.save
def save(self, file, contents, name=None, overwrite=False): """Save contents into a file. The format name can be specified explicitly or inferred from the file extension.""" if name is None: name = self.format_from_extension(op.splitext(file)[1]) file_format = self.file_type(name) if file_format == 'text': _write_text(file, contents) elif file_format == 'json': _write_json(file, contents) else: write_function = self._formats[name].get('save', None) if write_function is None: raise IOError("The format must declare a file type or " "load/save functions.") if op.exists(file) and not overwrite: print("The file already exists, please use overwrite=True.") return write_function(file, contents)
python
def save(self, file, contents, name=None, overwrite=False): """Save contents into a file. The format name can be specified explicitly or inferred from the file extension.""" if name is None: name = self.format_from_extension(op.splitext(file)[1]) file_format = self.file_type(name) if file_format == 'text': _write_text(file, contents) elif file_format == 'json': _write_json(file, contents) else: write_function = self._formats[name].get('save', None) if write_function is None: raise IOError("The format must declare a file type or " "load/save functions.") if op.exists(file) and not overwrite: print("The file already exists, please use overwrite=True.") return write_function(file, contents)
[ "def", "save", "(", "self", ",", "file", ",", "contents", ",", "name", "=", "None", ",", "overwrite", "=", "False", ")", ":", "if", "name", "is", "None", ":", "name", "=", "self", ".", "format_from_extension", "(", "op", ".", "splitext", "(", "file", ")", "[", "1", "]", ")", "file_format", "=", "self", ".", "file_type", "(", "name", ")", "if", "file_format", "==", "'text'", ":", "_write_text", "(", "file", ",", "contents", ")", "elif", "file_format", "==", "'json'", ":", "_write_json", "(", "file", ",", "contents", ")", "else", ":", "write_function", "=", "self", ".", "_formats", "[", "name", "]", ".", "get", "(", "'save'", ",", "None", ")", "if", "write_function", "is", "None", ":", "raise", "IOError", "(", "\"The format must declare a file type or \"", "\"load/save functions.\"", ")", "if", "op", ".", "exists", "(", "file", ")", "and", "not", "overwrite", ":", "print", "(", "\"The file already exists, please use overwrite=True.\"", ")", "return", "write_function", "(", "file", ",", "contents", ")" ]
Save contents into a file. The format name can be specified explicitly or inferred from the file extension.
[ "Save", "contents", "into", "a", "file", ".", "The", "format", "name", "can", "be", "specified", "explicitly", "or", "inferred", "from", "the", "file", "extension", "." ]
train
https://github.com/rossant/ipymd/blob/d87c9ebc59d67fe78b0139ee00e0e5307682e303/ipymd/core/format_manager.py#L183-L201
rossant/ipymd
ipymd/core/format_manager.py
FormatManager.create_reader
def create_reader(self, name, *args, **kwargs): """Create a new reader instance for a given format.""" self._check_format(name) return self._formats[name]['reader'](*args, **kwargs)
python
def create_reader(self, name, *args, **kwargs): """Create a new reader instance for a given format.""" self._check_format(name) return self._formats[name]['reader'](*args, **kwargs)
[ "def", "create_reader", "(", "self", ",", "name", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_check_format", "(", "name", ")", "return", "self", ".", "_formats", "[", "name", "]", "[", "'reader'", "]", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Create a new reader instance for a given format.
[ "Create", "a", "new", "reader", "instance", "for", "a", "given", "format", "." ]
train
https://github.com/rossant/ipymd/blob/d87c9ebc59d67fe78b0139ee00e0e5307682e303/ipymd/core/format_manager.py#L203-L206