code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
met2mol = map_metabolites_to_structures(model.metabolites, model.compartments) # Build a list associating reactions with their stoichiometry in molecular # structure space. structural = [] for rxn in model.reactions: # Ignore reactions that have metabolites without structures. if not all(met in met2mol for met in rxn.metabolites): continue # We consider substrates and products separately since, for example, # the InChI for H2O and OH is the same. substrates = { met2mol[met]: rxn.get_coefficient(met) for met in rxn.reactants } products = { met2mol[met]: rxn.get_coefficient(met) for met in rxn.products } structural.append((rxn, substrates, products)) # Compare reactions using their structure-based stoichiometries. num_duplicated = set() duplicates = [] for (rxn_a, sub_a, prod_a), (rxn_b, sub_b, prod_b) in combinations( structural, 2): # Compare the substrates. if sub_a != sub_b: continue # Compare the products. if prod_a != prod_b: continue # Compare whether they are both (ir-)reversible. if rxn_a.reversibility != rxn_b.reversibility: continue # TODO (Moritz Beber): We could compare bounds here but it might be # worth knowing about the reactions even if their bounds differ? duplicates.append((rxn_a.id, rxn_b.id)) num_duplicated.add(rxn_a.id) num_duplicated.add(rxn_b.id) return duplicates, len(num_duplicated)
def find_duplicate_reactions(model)
Return a list with pairs of reactions that are functionally identical. Identify duplicate reactions globally by checking if any two reactions have the same metabolites, same directionality and are in the same compartment. This can be useful to curate merged models or to clean-up bulk model modifications. The heuristic compares reactions in a pairwise manner. For each reaction, the metabolite annotations are checked for a description of the structure (via InChI and InChIKey).If they exist, substrates and products as well as the stoichiometries of any reaction pair are compared. Only reactions where the substrates, products, stoichiometry and reversibility are identical are considered to be duplicates. This test will not be able to identify duplicate reactions if there are no structure annotations. Further, it will report reactions with differing bounds as equal if they otherwise match the above conditions. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- list A list of pairs of duplicate reactions based on metabolites. int The number of unique reactions that have a duplicates
3.353668
3.281698
1.021931
duplicates = dict() for rxn_a, rxn_b in combinations(model.reactions, 2): if not (rxn_a.genes and rxn_b.genes): continue if rxn_a.genes == rxn_b.genes: # This works because the `genes` are frozen sets. identifiers = rxn_a.genes duplicates.setdefault(identifiers, set()).update( [rxn_a.id, rxn_b.id]) # Transform the object for JSON compatibility num_duplicated = set() duplicated = {} for key in duplicates: # Object keys must be strings in JSON. new_key = ",".join(sorted(g.id for g in key)) duplicated[new_key] = rxns = list(duplicates[key]) num_duplicated.update(rxns) return duplicated, len(num_duplicated)
def find_reactions_with_identical_genes(model)
Return reactions that have identical genes. Identify duplicate reactions globally by checking if any two reactions have the same genes. This can be useful to curate merged models or to clean-up bulk model modifications, but also to identify promiscuous enzymes. The heuristic compares reactions in a pairwise manner and reports on reaction pairs whose genes are identical. Reactions with missing genes are skipped. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- dict A mapping from sets of genes to all the reactions containing those genes. int The total number of unique reactions that appear duplicates based on their gene-protein-reaction associations.
3.624788
3.700794
0.979462
return [met.id for rxn in model.medium for met in model.reactions.get_by_id(rxn).metabolites]
def find_medium_metabolites(model)
Return the list of metabolites ingested/excreted by the model.
4.360921
3.792916
1.149754
ex_comp = find_external_compartment(model) return [met for met in model.metabolites if met.compartment == ex_comp]
def find_external_metabolites(model)
Return all metabolites in the external compartment.
3.450708
2.411144
1.43115
LOGGER.info("Storing result in '%s'.", filename) if filename.endswith(".gz"): with gzip.open(filename, "wb") as file_handle: file_handle.write( jsonify(result, pretty=pretty).encode("utf-8") ) else: with open(filename, "w", encoding="utf-8") as file_handle: file_handle.write(jsonify(result, pretty=pretty))
def store(self, result, filename, pretty=True)
Write a result to the given file. Parameters ---------- result : memote.MemoteResult The dictionary structure of results. filename : str or pathlib.Path Store results directly to the given filename. pretty : bool, optional Whether (default) or not to write JSON in a more legible format.
2.15518
2.143769
1.005323
LOGGER.info("Loading result from '%s'.", filename) if filename.endswith(".gz"): with gzip.open(filename, "rb") as file_handle: result = MemoteResult( json.loads(file_handle.read().decode("utf-8")) ) else: with open(filename, "r", encoding="utf-8") as file_handle: result = MemoteResult(json.load(file_handle)) # TODO (Moritz Beber): Validate the read-in JSON maybe? Trade-off # between extra time taken and correctness. Maybe we re-visit this # issue when there was a new JSON format version needed. return result
def load(self, filename)
Load a result from the given JSON file.
6.28723
5.784827
1.086848
doc = libsbml.readSBML(path) fbc = doc.getPlugin("fbc") sbml_ver = doc.getLevel(), doc.getVersion(), fbc if fbc is None else \ fbc.getVersion() with catch_warnings(record=True) as warnings: simplefilter("always") try: model = read_sbml_model(path) except Exception as err: notifications['errors'].append(str(err)) model = None validate = True else: validate = False notifications['warnings'].extend([str(w.message) for w in warnings]) if validate: run_sbml_validation(doc, notifications) return model, sbml_ver
def load_cobra_model(path, notifications)
Load a COBRA model with meta information from an SBML document.
3.687373
3.519193
1.047789
return "Line {}, Column {} - #{}: {} - Category: {}, Severity: {}".format( failure.getLine(), failure.getColumn(), failure.getErrorId(), failure.getMessage(), failure.getCategoryAsString(), failure.getSeverity() )
def format_failure(failure)
Format how an error or warning should be displayed.
5.732795
5.188221
1.104963
validator = libsbml.SBMLValidator() validator.validate(document) for i in range(document.getNumErrors()): notifications['errors'].append(format_failure(document.getError(i))) for i in range(validator.getNumFailures()): failure = validator.getFailure(i) if failure.isWarning(): notifications['warnings'].append(format_failure(failure)) else: notifications['errors'].append(format_failure(failure))
def run_sbml_validation(document, notifications)
Report errors and warnings found in an SBML document.
2.22251
2.235252
0.994299
git_info = self.record_git_info(commit) try: row = self.session.query(Result). \ filter_by(hexsha=git_info.hexsha). \ one() LOGGER.info("Updating result '%s'.", git_info.hexsha) row.memote_result = result except NoResultFound: row = Result(memote_result=result) LOGGER.info("Storing result '%s'.", git_info.hexsha) row.hexsha = git_info.hexsha row.author = git_info.author row.email = git_info.email row.authored_on = git_info.authored_on self.session.add(row) self.session.commit()
def store(self, result, commit=None, **kwargs)
Store a result in a JSON file attaching git meta information. Parameters ---------- result : memote.MemoteResult The dictionary structure of results. commit : str, optional Unique hexsha of the desired commit. kwargs : Passed to parent function.
2.880211
2.706098
1.064341
git_info = self.record_git_info(commit) LOGGER.info("Loading result from '%s'.", git_info.hexsha) result = MemoteResult( self.session.query(Result.memote_result). filter_by(hexsha=git_info.hexsha). one().memote_result) # Add git info so the object is equivalent to the one returned by the # RepoResultManager. self.add_git(result.meta, git_info) return result
def load(self, commit=None)
Load a result from the database.
7.899467
7.312234
1.080308
def format_data(data): # TODO Remove this failsafe once proper error handling is in place. if type == "percent" or data is None: # Return an empty list here to reduce the output file size. # The angular report will ignore the `data` and instead display # the `metric`. return [] if type == "count": return len(data) return data base = dict() tests = base.setdefault("tests", dict()) score = base.setdefault("score", dict()) score_collection = score.setdefault("total_score", dict()) for branch, commits in self._history.iter_branches(): for commit in reversed(commits): result = self.result = self._history.get_result(commit) # Calculate the score for each result and store all the total # scores for each commit in the base dictionary. self.compute_score() total_score = self.result["score"]["total_score"] score_collection.setdefault("history", list()) score_collection["format_type"] = "score" score_collection["history"].append({ "branch": branch, "commit": commit, "metric": total_score}) # Now arrange the results for each test into the appropriate # format. Specifically such that the Accordion and the Vega # Plot components can easily read them. for test in result.cases: tests.setdefault(test, dict()) if "title" not in tests[test]: tests[test]["title"] = result.cases[test]["title"] if "summary" not in tests[test]: tests[test]["summary"] = result.cases[test]["summary"] if "type" not in tests[test]: tests[test]["format_type"] = result.cases[test][ "format_type"] type = tests[test]["format_type"] metric = result.cases[test].get("metric") data = result.cases[test].get("data") res = result.cases[test].get("result") if isinstance(metric, dict): tests[test].setdefault("history", dict()) for param in metric: tests[test]["history"].setdefault(param, list()). \ append({ "branch": branch, "commit": commit, "metric": metric.get(param), "data": format_data(data.get(param)), "result": res.get(param)}) else: tests[test].setdefault("history", list()).append({ "branch": branch, "commit": commit, "metric": metric, "data": format_data(data), "result": res }) return base
def collect_history(self)
Build the structure of results in terms of a commit history.
3.725594
3.647769
1.021335
base = dict() meta = base.setdefault('meta', dict()) tests = base.setdefault('tests', dict()) score = base.setdefault('score', dict()) for model_filename, result in iteritems(diff_results): if meta == dict(): meta = result["meta"] for test_id, test_results in iteritems(result["tests"]): tests.setdefault(test_id, dict()) if tests[test_id] == dict(): tests[test_id]["summary"] = test_results["summary"] tests[test_id]["title"] = test_results["title"] tests[test_id]["format_type"] = test_results["format_type"] if isinstance(test_results["metric"], dict): tests[test_id].setdefault("diff", dict()) for param in test_results["metric"]: tests[test_id]["diff"].setdefault(param, list()). \ append({ "model": model_filename, "data": test_results["data"].setdefault(param), "duration": test_results["duration"].setdefault(param), "message": test_results["message"].setdefault(param), "metric": test_results["metric"].setdefault(param), "result": test_results["result"].setdefault(param)}) else: tests[test_id].setdefault("diff", list()) tests[test_id]["diff"].append({ "model": model_filename, "data": test_results.setdefault("data"), "duration": test_results.setdefault("duration"), "message": test_results.setdefault("message"), "metric": test_results.setdefault("metric"), "result": test_results.setdefault("result")}) self.result = result self.compute_score() score.setdefault('total_score', dict()).setdefault('diff', list()) score.setdefault('sections', dict()).setdefault('diff', list()) score['total_score']['diff'].append({ "model": model_filename, "total_score": self.result['score']['total_score']}) for section in self.result['score']['sections']: section.update({"model": model_filename}) score['sections']['diff'].append(section) return base
def format_and_score_diff_data(self, diff_results)
Reformat the api results to work with the front-end.
2.274915
2.256858
1.008001
# Reduce the whole database to targets of interest. xref = mnx_db.loc[mnx_db["MNX_ID"].isin(shortlist["MNX_ID"]), :] # Drop deprecated MetaNetX identifiers. Disabled for now. # xref = xref.loc[~xref["XREF"].str.startswith("deprecated", na=False), :] # Drop self-references for now since they don't follow the format. xref = xref.loc[xref["XREF"] != xref["MNX_ID"], :] # Split namespaces from identifiers. xref[["XREF_ID", "XREF"]] = xref["XREF"].str.split(":", n=1, expand=True) # Group the data in the xref dataframe so that one MNX ID maps to all # corresponding cross-references from other databases. Then list all # identifiers that belong to these databases: # MNX_ID XREF_ID # MNXM0 chebi [23367, 59999] # metacyc [UNKNOWN] # Make a separate column for every XREF_ID: # MNX_ID chebi metacyc # MNXM0 [23367, 59999] [UNKNOWN] xref = xref.groupby(["MNX_ID", "XREF_ID"], as_index=False, sort=False)[ "XREF"].apply(list).unstack('XREF_ID') # Re-insert MetaNetX identifiers as lists. # FIXME: Shouldn't we use metanetx.chemical here instead of 'mnx'? xref["mnx"] = [[x] for x in xref.index] # Transpose the data frame such that the index are now xref databases and # the column names are MetaNetX identifiers. return xref.T
def generate_shortlist(mnx_db, shortlist)
Create a condensed cross-references format from data in long form. Both data frames must contain a column 'MNX_ID' and the dump is assumed to also have a column 'XREF'. Parameters ---------- mnx_db : pandas.DataFrame The entire MetaNetX dump as a data frame. shortlist : pandas.DataFrame The shortlist of targets as a data frame. Returns ------- pandas.DataFrame A condensed format with MetaNetX identifiers as the column index and database identifiers as the row index. Elements are lists and often have multiple entries.
5.334105
4.965311
1.074274
LOGGER.info("Read shortlist.") targets = pd.read_table(join(dirname(__file__), "shortlist.tsv")) if not exists(mnx_dump): # Download the MetaNetX chemicals dump if it doesn't exists. # Download done as per https://stackoverflow.com/a/16696317. LOGGER.info("MetaNetX dump '%s' does not exist. Downloading...", mnx_dump) with open(mnx_dump, "wb") as file_handle, \ get("https://www.metanetx.org/cgi-bin/mnxget/mnxref/chem_xref.tsv", stream=True) as stream: for chunk in stream.iter_content(chunk_size=1024): file_handle.write(chunk) LOGGER.info("Done.") LOGGER.info("Read the MetaNetX dump with cross-references.") db = pd.read_table(mnx_dump, comment='#', names=['XREF', 'MNX_ID', 'Evidence', 'Description']) LOGGER.info("Generate the shortlist cross-references.") res = generate_shortlist(db, targets) LOGGER.info("Save result.") res.to_json(join(dirname(__file__), pardir, "memote", "support", "data", "met_id_shortlist.json"), force_ascii=False)
def generate(mnx_dump)
Annotate a shortlist of metabolites with cross-references using MetaNetX. MNX_DUMP : The chemicals dump from MetaNetX usually called 'chem_xref.tsv'. Will be downloaded if it doesn't exist.
4.360621
3.644452
1.19651
custom = [ check_partial(gene_id_check, frozenset(g.id for g in model.genes)) ] super(EssentialityExperiment, self).validate( model=model, checks=checks + custom)
def validate(self, model, checks=[])
Use a defined schema to validate the medium table format.
11.618004
11.771293
0.986978
with model: if self.medium is not None: self.medium.apply(model) if self.objective is not None: model.objective = self.objective model.add_cons_vars(self.constraints) max_val = model.slim_optimize() essen = single_gene_deletion( model, gene_list=self.data["gene"], processes=1) essen["gene"] = [list(g)[0] for g in essen.index] essen.index = essen["gene"] essen["essential"] = (essen["growth"] < (max_val * 0.1)) \ | essen["growth"].isna() return essen
def evaluate(self, model)
Use the defined parameters to predict single gene essentiality.
6.067749
5.480217
1.10721
def decorator(func): registry[func.__name__] = func return func return decorator
def register_with(registry)
Register a passed in object. Intended to be used as a decorator on model building functions with a ``dict`` as a registry. Examples -------- .. code-block:: python REGISTRY = dict() @register_with(REGISTRY) def build_empty(base): return base
3.146045
7.13702
0.440807
if format_type not in TYPES: raise ValueError( "Invalid type. Expected one of: {}.".format(", ".join(TYPES))) def decorator(func): func.annotation = dict( title=title, summary=extended_summary(func), message=message, data=data, format_type=format_type, metric=metric) return func return decorator
def annotate(title, format_type, message=None, data=None, metric=1.0)
Annotate a test case with info that should be displayed in the reports. Parameters ---------- title : str A human-readable descriptive title of the test case. format_type : str A string that determines how the result data is formatted in the report. It is expected not to be None. * 'number' : 'data' is a single number which can be an integer or float and should be represented as such. * 'count' : 'data' is a list, set or tuple. Choosing 'count' will display the length of that list e.g. number of metabolites without formula. * 'percent' : Instead of 'data' the content of 'metric' ought to be displayed e.g. percentage of metabolites without charge. 'metric' is expected to be a floating point number. * 'raw' : 'data' is ought to be displayed "as is" without formatting. This option is appropriate for single strings or a boolean output. message : str A short written explanation that states and possibly explains the test result. data Raw data which the test case generates and assesses. Can be of the following types: list, set, tuple, string, float, integer, and boolean. metric: float A value x in the range of 0 <= x <= 1 which represents the fraction of 'data' to the total in the model. For example, if 'data' are all metabolites without formula, 'metric' should be the fraction of metabolites without formula from the total of metabolites in the model. Returns ------- function The decorated function, now extended by the attribute 'annotation'. Notes ----- Adds "annotation" attribute to the function object, which stores values for predefined keys as a dictionary.
3.388693
3.617796
0.936673
if len(sequence) > LIST_SLICE: return ", ".join(sequence[:LIST_SLICE] + ["..."]) else: return ", ".join(sequence)
def truncate(sequence)
Create a potentially shortened text display of a list. Parameters ---------- sequence : list An indexable sequence of elements. Returns ------- str The list as a formatted string.
3.346365
3.734546
0.896057
keys_to_explore = list(obj) while len(keys_to_explore) > 0: key = keys_to_explore.pop() if not isinstance(key, str): LOGGER.info(type(key)) value = obj[key] if isinstance(value, dict): LOGGER.info("%s:", key) log_json_incompatible_types(value) elif not isinstance(value, JSON_TYPES): LOGGER.info("%s: %s", key, type(value)) elif isinstance(value, (int, float)) and not isfinite(value): LOGGER.info("%s: %f", key, value)
def log_json_incompatible_types(obj)
Log types that are not JSON compatible. Explore a nested dictionary structure and log types that are not JSON compatible. Parameters ---------- obj : dict A potentially nested dictionary.
2.511616
2.607453
0.963245
if pretty: params = dict(sort_keys=True, indent=2, allow_nan=False, separators=(",", ": "), ensure_ascii=False) else: params = dict(sort_keys=False, indent=None, allow_nan=False, separators=(",", ":"), ensure_ascii=False) try: return json.dumps(obj, **params) except (TypeError, ValueError) as error: LOGGER.critical( "The memote result structure is incompatible with the JSON " "standard.") log_json_incompatible_types(obj) raise_with_traceback(error)
def jsonify(obj, pretty=False)
Turn a nested object into a (compressed) JSON string. Parameters ---------- obj : dict Any kind of dictionary structure. pretty : bool, optional Whether to format the resulting JSON in a more legible way ( default False).
3.416768
3.456017
0.988643
flat_list = [] for sublist in list_of_lists: if isinstance(sublist, string_types) or isinstance(sublist, int): flat_list.append(sublist) elif sublist is None: continue elif not isinstance(sublist, string_types) and len(sublist) == 1: flat_list.append(sublist[0]) else: flat_list.append(tuple(sublist)) return flat_list
def flatten(list_of_lists)
Flatten a list of lists but maintain strings and ints as entries.
1.940279
1.804353
1.075332
for error in notifications["errors"]: LOGGER.error(error) for warn in notifications["warnings"]: LOGGER.warning(warn)
def stdout_notifications(notifications)
Print each entry of errors and warnings to stdout. Parameters ---------- notifications: dict A simple dictionary structure containing a list of errors and warnings.
3.722474
3.652075
1.019276
self.data = read_tabular(self.filename, dtype_conversion) with open_text(memote.experimental.schemata, self.SCHEMA, encoding="utf-8") as file_handle: self.schema = json.load(file_handle)
def load(self, dtype_conversion=None)
Load the data table and corresponding validation schema. Parameters ---------- dtype_conversion : dict Column names as keys and corresponding type for loading the data. Please take a look at the `pandas documentation <https://pandas.pydata.org/pandas-docs/stable/io.html#specifying-column-data-types>`__ for detailed explanations.
8.81836
9.932569
0.887823
records = self.data.to_dict("records") self.evaluate_report( validate(records, headers=list(records[0]), preset='table', schema=self.schema, order_fields=True, custom_checks=checks))
def validate(self, model, checks=[])
Use a defined schema to validate the given table.
13.498739
11.505346
1.173258
if report["valid"]: return for warn in report["warnings"]: LOGGER.warning(warn) # We only ever test one table at a time. for err in report["tables"][0]["errors"]: LOGGER.error(err["message"]) raise ValueError("Invalid data file. Please see errors above.")
def evaluate_report(report)
Iterate over validation errors.
7.601578
6.596705
1.152329
constraints = [] for rxn in reactions: expression = add( [c * model.variables[m.id] for m, c in rxn.metabolites.items()]) constraints.append(Constraint(expression, lb=0, ub=0, name=rxn.id)) model.add(constraints)
def add_reaction_constraints(model, reactions, Constraint)
Add the stoichiometric coefficients as constraints. Parameters ---------- model : optlang.Model The transposed stoichiometric matrix representation. reactions : iterable Container of `cobra.Reaction` instances. Constraint : optlang.Constraint The constraint class for the specific interface.
3.544482
3.963964
0.894176
matrix = np.zeros((len(metabolites), len(reactions))) met_index = dict((met, i) for i, met in enumerate(metabolites)) rxn_index = dict() for i, rxn in enumerate(reactions): rxn_index[rxn] = i for met, coef in iteritems(rxn.metabolites): j = met_index[met] matrix[j, i] = coef return matrix, met_index, rxn_index
def stoichiometry_matrix(metabolites, reactions)
Return the stoichiometry matrix representation of a set of reactions. The reactions and metabolites order is respected. All metabolites are expected to be contained and complete in terms of the reactions. Parameters ---------- reactions : iterable A somehow ordered list of unique reactions. metabolites : iterable A somehow ordered list of unique metabolites. Returns ------- numpy.array The 2D array that represents the stoichiometry matrix. dict A dictionary mapping metabolites to row indexes. dict A dictionary mapping reactions to column indexes.
1.968868
2.144646
0.918038
matrix = np.atleast_2d(matrix) sigma = svd(matrix, compute_uv=False) tol = max(atol, rtol * sigma[0]) return int((sigma >= tol).sum())
def rank(matrix, atol=1e-13, rtol=0)
Estimate the rank, i.e., the dimension of the column space, of a matrix. The algorithm used by this function is based on the singular value decomposition of `stoichiometry_matrix`. Parameters ---------- matrix : ndarray The matrix should be at most 2-D. A 1-D array with length k will be treated as a 2-D with shape (1, k) atol : float The absolute tolerance for a zero singular value. Singular values smaller than ``atol`` are considered to be zero. rtol : float The relative tolerance for a zero singular value. Singular values less than the relative tolerance times the largest singular value are considered to be zero. Notes ----- If both `atol` and `rtol` are positive, the combined tolerance is the maximum of the two; that is:: tol = max(atol, rtol * smax) Singular values smaller than ``tol`` are considered to be zero. Returns ------- int The estimated rank of the matrix. See Also -------- numpy.linalg.matrix_rank matrix_rank is basically the same as this function, but it does not provide the option of the absolute tolerance.
3.581346
3.917831
0.914114
def nullspace(matrix, atol=1e-13, rtol=0.0): # noqa: D402 matrix = np.atleast_2d(matrix) _, sigma, vh = svd(matrix) tol = max(atol, rtol * sigma[0]) num_nonzero = (sigma >= tol).sum() return vh[num_nonzero:].conj().T
Compute an approximate basis for the null space (kernel) of a matrix. The algorithm used by this function is based on the singular value decomposition of the given matrix. Parameters ---------- matrix : ndarray The matrix should be at most 2-D. A 1-D array with length k will be treated as a 2-D with shape (1, k) atol : float The absolute tolerance for a zero singular value. Singular values smaller than ``atol`` are considered to be zero. rtol : float The relative tolerance for a zero singular value. Singular values less than the relative tolerance times the largest singular value are considered to be zero. Notes ----- If both `atol` and `rtol` are positive, the combined tolerance is the maximum of the two; that is:: tol = max(atol, rtol * smax) Singular values smaller than ``tol`` are considered to be zero. Returns ------- ndarray If ``matrix`` is an array with shape (m, k), then the returned nullspace will be an array with shape ``(k, n)``, where n is the estimated dimension of the nullspace. References ---------- Adapted from: https://scipy.github.io/old-wiki/pages/Cookbook/RankNullspace.html
null
null
null
return ( model.solver.interface.Model, model.solver.interface.Constraint, model.solver.interface.Variable, model.solver.interface.Objective )
def get_interface(model)
Return the interface specific classes. Parameters ---------- model : cobra.Model The metabolic model under investigation.
4.175203
3.968763
1.052016
biomass = set(find_biomass_reaction(model)) if len(biomass) == 0: LOGGER.warning("No biomass reaction detected. Consistency test results " "are unreliable if one exists.") return set(model.reactions) - (set(model.boundary) | biomass)
def get_internals(model)
Return non-boundary reactions and their metabolites. Boundary reactions are unbalanced by their nature. They are excluded here and only the metabolites of the others are considered. Parameters ---------- model : cobra.Model The metabolic model under investigation.
6.56553
6.470509
1.014685
assert len(metabolites) == kernel.shape[0],\ "metabolite vector and first nullspace dimension must be equal" ns_problem = Model() k_vars = list() for met in metabolites: # The element y[i] of the mass vector. y_var = Variable(met.id) k_var = Variable("k_{}".format(met.id), type="binary") k_vars.append(k_var) ns_problem.add([y_var, k_var]) # This constraint is equivalent to 0 <= y[i] <= k[i]. ns_problem.add(Constraint( y_var - k_var, ub=0, name="switch_{}".format(met.id))) ns_problem.update() # add nullspace constraints for (j, column) in enumerate(kernel.T): expression = sympy.Add( *[coef * ns_problem.variables[met.id] for (met, coef) in zip(metabolites, column) if coef != 0.0]) constraint = Constraint(expression, lb=0, ub=0, name="ns_{}".format(j)) ns_problem.add(constraint) # The objective is to minimize the binary indicators k[i], subject to # the above inequality constraints. ns_problem.objective = Objective(1) ns_problem.objective.set_linear_coefficients( {k_var: 1. for k_var in k_vars}) ns_problem.objective.direction = "min" return ns_problem, k_vars
def create_milp_problem(kernel, metabolites, Model, Variable, Constraint, Objective)
Create the MILP as defined by equation (13) in [1]_. Parameters ---------- kernel : numpy.array A 2-dimensional array that represents the left nullspace of the stoichiometric matrix which is the nullspace of the transpose of the stoichiometric matrix. metabolites : iterable The metabolites in the nullspace. The length of this vector must equal the first dimension of the nullspace. Model : optlang.Model Model class for a specific optlang interface. Variable : optlang.Variable Variable class for a specific optlang interface. Constraint : optlang.Constraint Constraint class for a specific optlang interface. Objective : optlang.Objective Objective class for a specific optlang interface. References ---------- .. [1] Gevorgyan, A., M. G Poolman, and D. A Fell. "Detection of Stoichiometric Inconsistencies in Biomolecular Models." Bioinformatics 24, no. 19 (2008): 2245.
3.488011
3.428319
1.017411
cut = Constraint(sympy.Add(*indicators), ub=bound) problem.add(cut) return cut
def add_cut(problem, indicators, bound, Constraint)
Add an integer cut to the problem. Ensure that the same solution involving these indicator variables cannot be found by enforcing their sum to be less than before. Parameters ---------- problem : optlang.Model Specific optlang interface Model instance. indicators : iterable Binary indicator `optlang.Variable`s. bound : int Should be one less than the sum of indicators. Corresponds to P - 1 in equation (14) in [1]_. Constraint : optlang.Constraint Constraint class for a specific optlang interface. References ---------- .. [1] Gevorgyan, A., M. G Poolman, and D. A Fell. "Detection of Stoichiometric Inconsistencies in Biomolecular Models." Bioinformatics 24, no. 19 (2008): 2245.
6.559551
13.439859
0.488067
balance = defaultdict(int) for metabolite, coefficient in iteritems(reaction.metabolites): if metabolite.elements is None or len(metabolite.elements) == 0: return False for element, amount in iteritems(metabolite.elements): balance[element] += coefficient * amount return all(amount == 0 for amount in itervalues(balance))
def is_mass_balanced(reaction)
Confirm that a reaction is mass balanced.
2.617828
2.520517
1.038608
charge = 0 for metabolite, coefficient in iteritems(reaction.metabolites): if metabolite.charge is None: return False charge += coefficient * metabolite.charge return charge == 0
def is_charge_balanced(reaction)
Confirm that a reaction is charge balanced.
2.652765
2.569924
1.032235
new_func = partial(func, *args, **kwargs) new_func.check = func.check return new_func
def check_partial(func, *args, **kwargs)
Create a partial to be used by goodtables.
2.958306
3.041656
0.972597
message = ("Gene '{value}' in column {col} and row {row} does not " "appear in the metabolic model.") for column in columns: if "gene" in column['header'] and column['value'] not in genes: message = message.format( value=column['value'], row=row_number, col=column['number']) errors.append({ 'code': 'bad-value', 'message': message, 'row-number': row_number, 'column-number': column['number'], })
def gene_id_check(genes, errors, columns, row_number)
Validate gene identifiers against a known set. Parameters ---------- genes : set The known set of gene identifiers. errors : Passed by goodtables. columns : Passed by goodtables. row_number : Passed by goodtables.
3.435549
3.671214
0.935807
message = ("Reaction '{value}' in column {col} and row {row} does not " "appear in the metabolic model.") for column in columns: if "reaction" in column['header'] and column['value'] not in reactions: message = message.format( value=column['value'], row=row_number, col=column['number']) errors.append({ 'code': 'bad-value', 'message': message, 'row-number': row_number, 'column-number': column['number'], })
def reaction_id_check(reactions, errors, columns, row_number)
Validate reactions identifiers against a known set. Parameters ---------- reactions : set The known set of reaction identifiers. errors : Passed by goodtables. columns : Passed by goodtables. row_number : Passed by goodtables.
3.28859
3.501697
0.939142
message = ("Metabolite '{value}' in column {col} and row {row} does not " "appear in the metabolic model.") for column in columns: if "metabolite" in column['header'] and \ column['value'] not in metabolites: message = message.format( value=column['value'], row=row_number, col=column['number']) errors.append({ 'code': 'bad-value', 'message': message, 'row-number': row_number, 'column-number': column['number'], })
def metabolite_id_check(metabolites, errors, columns, row_number)
Validate metabolite identifiers against a known set. Parameters ---------- metabolites : set The known set of metabolite identifiers. errors : Passed by goodtables. columns : Passed by goodtables. row_number : Passed by goodtables.
3.041015
3.28348
0.926156
def is_verbose(arg): return (arg.startswith("--verbosity") or arg.startswith("-v") or arg.startswith("--verbose") or arg.startswith("-q") or arg.startswith("--quiet")) if ignore_git: repo = None else: callbacks.git_installed() repo = callbacks.probe_git() if collect: if repo is not None: if location is None: LOGGER.critical( "Working with a repository requires a storage location.") sys.exit(1) if not any(a.startswith("--tb") for a in pytest_args): pytest_args = ["--tb", "short"] + pytest_args if not any(is_verbose(a) for a in pytest_args): pytest_args.append("-vv") # Check if the model was changed in this commit. Exit `memote run` if this # was not the case. if skip_unchanged and repo is not None: commit = repo.head.commit if not is_modified(model, commit): LOGGER.info("The model was not modified in commit '%s'. Skipping.", commit.hexsha) sys.exit(0) # Add further directories to search for tests. pytest_args.extend(custom_tests) # Check if the model can be loaded at all. model, sbml_ver, notifications = api.validate_model(model) if model is None: LOGGER.critical( "The model could not be loaded due to the following SBML errors.") stdout_notifications(notifications) sys.exit(1) model.solver = solver # Load the experimental configuration using model information. if experimental is not None: experimental.load(model) code, result = api.test_model( model=model, sbml_version=sbml_ver, results=True, pytest_args=pytest_args, skip=skip, exclusive=exclusive, experimental=experimental) if collect: if repo is None: manager = ResultManager() manager.store(result, filename=filename) else: LOGGER.info("Checking out deployment branch.") # If the repo HEAD is pointing to the most recent branch then # GitPython's `repo.active_branch` works. Yet, if the repo is in # detached HEAD state, i.e., when a user has checked out a specific # commit as opposed to a branch, this won't work and throw a # `TypeError`, which we are circumventing below. try: previous = repo.active_branch previous_cmt = previous.commit is_branch = True except TypeError: previous_cmt = repo.head.commit is_branch = False repo.git.checkout(deployment) try: manager = SQLResultManager(repository=repo, location=location) except (AttributeError, ArgumentError): manager = RepoResultManager(repository=repo, location=location) LOGGER.info( "Committing result and changing back to working branch.") manager.store(result, commit=previous_cmt.hexsha) repo.git.add(".") check_call( ['git', 'commit', '-m', "chore: add result for {}".format(previous_cmt.hexsha)] ) if is_branch: previous.checkout() else: repo.commit(previous_cmt)
def run(model, collect, filename, location, ignore_git, pytest_args, exclusive, skip, solver, experimental, custom_tests, deployment, skip_unchanged)
Run the test suite on a single model and collect results. MODEL: Path to model file. Can also be supplied via the environment variable MEMOTE_MODEL or configured in 'setup.cfg' or 'memote.ini'.
4.394983
4.344015
1.011733
callbacks.git_installed() if directory is None: directory = os.getcwd() cookiecutter("gh:opencobra/cookiecutter-memote", output_dir=directory, replay=replay)
def new(directory, replay)
Create a suitable model repository structure from a template. By using a cookiecutter template, memote will ask you a couple of questions and set up a new directory structure that will make your life easier. The new directory will be placed in the current directory or respect the given --directory option.
15.899298
10.347894
1.536477
callbacks.git_installed() try: repo = git.Repo() except git.InvalidGitRepositoryError: LOGGER.critical( "'memote online' requires a git repository in order to follow " "the current branch's commit history.") sys.exit(1) if note == "memote-ci access": note = "{} to {}".format(note, github_repository) # Github API calls # Set up the git repository on GitHub via API v3. gh_repo_name, auth_token, repo_access_token = _setup_gh_repo( github_repository, github_username, note ) # Travis API calls # Configure Travis CI to use Github auth token then return encrypted token. secret = _setup_travis_ci(gh_repo_name, auth_token, repo_access_token) # Save the encrypted token in the travis config then commit and push LOGGER.info("Storing GitHub token in '.travis.yml'.") config = te.load_travis_configuration(".travis.yml") global_env = config.setdefault("env", {}).get("global") if global_env is None: config["env"]["global"] = global_env = {} try: global_env["secure"] = secret except TypeError: global_env.append({"secure": secret}) te.dump_travis_configuration(config, ".travis.yml") LOGGER.info("Add, commit and push changes to '.travis.yml' to GitHub.") repo.index.add([".travis.yml"]) check_call( ['git', 'commit', '-m', "chore: add encrypted GitHub access token"] ) check_call( ['git', 'push', '--set-upstream', 'origin', repo.active_branch.name] )
def online(note, github_repository, github_username)
Upload the repository to GitHub and enable testing on Travis CI.
4.944616
4.911711
1.006699
target_file = os.path.abspath( join("tests", "data", "memote-mock-repo.tar.gz") ) temp_dir = mkdtemp(prefix='tmp_mock') previous_wd = os.getcwd() try: LOGGER.info("Cloning repository.") os.chdir(temp_dir) check_output( ['git', 'clone', 'https://github.com/ChristianLieven/memote-mock-repo.git'] ) os.chdir('memote-mock-repo/') LOGGER.info("Setting git to ignore filemode changes.") call( ['git', 'config', 'core.fileMode', 'false'] ) call( ['git', 'config', 'user.email', '[email protected]'] ) call( ['git', 'config', 'user.name', 'memote-bot'] ) finally: LOGGER.info("Compressing to tarball.") tar = tarfile.open(target_file, "w:gz") tar.add( join(temp_dir, 'memote-mock-repo/'), arcname="memote-mock-repo" ) tar.close() LOGGER.info("Success!") LOGGER.info("Removing temporary directory.") rmtree(temp_dir) LOGGER.info("Success! The mock repo has been updated.") os.chdir(previous_wd)
def update_mock_repo()
Clone and gzip the memote-mock-repo used for CLI and integration tests. The repo is hosted at 'https://github.com/ChristianLieven/memote-mock-repo.git' and maintained separately from
3.026643
2.748151
1.101338
return sum(-coef * met.formula_weight for (met, coef) in iteritems(reaction.metabolites)) / 1000.0
def sum_biomass_weight(reaction)
Compute the sum of all reaction compounds. This function expects all metabolites of the biomass reaction to have formula information assigned. Parameters ---------- reaction : cobra.core.reaction.Reaction The biomass reaction of the model under investigation. Returns ------- float The molecular weight of the biomass reaction in units of g/mmol.
8.564077
8.341884
1.026636
id_of_main_compartment = helpers.find_compartment_id_in_model(model, 'c') gam_reactants = set() try: gam_reactants.update([ helpers.find_met_in_model( model, "MNXM3", id_of_main_compartment)[0]]) except RuntimeError: pass try: gam_reactants.update([ helpers.find_met_in_model( model, "MNXM2", id_of_main_compartment)[0]]) except RuntimeError: pass biomass_precursors = set(reaction.reactants) - gam_reactants return list(biomass_precursors)
def find_biomass_precursors(model, reaction)
Return a list of all biomass precursors excluding ATP and H2O. Parameters ---------- reaction : cobra.core.reaction.Reaction The biomass reaction of the model under investigation. model : cobra.Model The metabolic model under investigation. Returns ------- list Metabolite objects that are reactants of the biomass reaction excluding ATP and H2O.
2.968097
3.203453
0.926531
LOGGER.debug("Finding blocked biomass precursors") precursors = find_biomass_precursors(model, reaction) blocked_precursors = list() _, ub = helpers.find_bounds(model) for precursor in precursors: with model: dm_rxn = model.add_boundary( precursor, type="safe-demand", reaction_id="safe_demand", lb=0, ub=ub ) flux = helpers.run_fba(model, dm_rxn.id, direction='max') if np.isnan(flux) or abs(flux) < 1E-08: blocked_precursors.append(precursor) return blocked_precursors
def find_blocked_biomass_precursors(reaction, model)
Return a list of all biomass precursors that cannot be produced. Parameters ---------- reaction : cobra.core.reaction.Reaction The biomass reaction of the model under investigation. model : cobra.Model The metabolic model under investigation. Returns ------- list Metabolite objects that are reactants of the biomass reaction excluding ATP and H2O that cannot be produced by flux balance analysis.
4.001544
4.068504
0.983542
id_of_main_compartment = helpers.find_compartment_id_in_model(model, 'c') try: left = { helpers.find_met_in_model( model, "MNXM3", id_of_main_compartment)[0], helpers.find_met_in_model( model, "MNXM2", id_of_main_compartment)[0] } right = { helpers.find_met_in_model( model, "MNXM7", id_of_main_compartment)[0], helpers.find_met_in_model( model, "MNXM1", id_of_main_compartment)[0], helpers.find_met_in_model( model, "MNXM9", id_of_main_compartment)[0] } except RuntimeError: return False return ( left.issubset(set(reaction.reactants)) and right.issubset(set(reaction.products)))
def gam_in_biomass(model, reaction)
Return boolean if biomass reaction includes growth-associated maintenance. Parameters ---------- model : cobra.Model The metabolic model under investigation. reaction : cobra.core.reaction.Reaction The biomass reaction of the model under investigation. Returns ------- boolean True if the biomass reaction includes ATP and H2O as reactants and ADP, Pi and H as products, False otherwise.
2.1939
2.270544
0.966244
biomass_rxns = set(helpers.find_biomass_reaction(model)) tra_bou_bio_rxns = helpers.find_interchange_biomass_reactions( model, biomass_rxns) try: precursors = find_biomass_precursors(model, reaction) main_comp = helpers.find_compartment_id_in_model(model, 'c') ext_space = helpers.find_compartment_id_in_model(model, 'e') except KeyError: LOGGER.error("Failed to properly identify cytosolic and extracellular " "compartments.") raise_with_traceback(KeyError("The cytosolic and/or extracellular " "compartments could not be identified.")) except RuntimeError: LOGGER.error("Failed to properly identify cytosolic and extracellular " "compartments.") raise_with_traceback(RuntimeError("The cytosolic and/or extracellular " "compartments could not be " "identified.")) else: tra_bou_bio_mets = [met for met in precursors if met.reactions.issubset(tra_bou_bio_rxns)] rxns_of_interest = set([rxn for met in tra_bou_bio_mets for rxn in met.reactions if rxn not in biomass_rxns]) solution = model.optimize(raise_error=True) if np.isclose(solution.objective_value, 0, atol=tolerance): LOGGER.error("Failed to generate a non-zero objective value with " "flux balance analysis.") raise OptimizationError( "The flux balance analysis on this model returned an " "objective value of zero. Make sure the model can " "grow! Check if the constraints are not too strict!") tra_bou_bio_fluxes = {r: solution[r.id] for r in rxns_of_interest} met_flux_sum = {m: 0 for m in tra_bou_bio_mets} return detect_false_positive_direct_metabolites( tra_bou_bio_mets, biomass_rxns, main_comp, ext_space, tra_bou_bio_fluxes, met_flux_sum)
def find_direct_metabolites(model, reaction, tolerance=1E-06)
Return list of possible direct biomass precursor metabolites. The term direct metabolites describes metabolites that are involved only in either transport and/or boundary reactions, AND the biomass reaction(s), but not in any purely metabolic reactions. Parameters ---------- model : cobra.Model The metabolic model under investigation. reaction : cobra.Reaction The biomass reaction of the model under investigation. tolerance : float, optional Tolerance below which values will be regarded as zero. Returns ------- list Metabolites that qualify as direct metabolites i.e. biomass precursors that are taken up to be consumed by the biomass reaction only.
3.305156
3.280117
1.007633
for met in candidates: is_internal = met.compartment != extra for rxn in met.reactions: if rxn in biomass_reactions: continue # Internal metabolites can not be false positives. if is_internal: metabolite_fluxes[met] += abs(reaction_fluxes[rxn]) continue # if the metabolite is in the "e" compartment and a reactant, # sum the fluxes accordingly (outward=negative, inward=positive) if met in rxn.reactants: product_comps = set([p.compartment for p in rxn.products]) # if the reaction has no product (outward flux) if len(product_comps) == 0: metabolite_fluxes[met] += -reaction_fluxes[rxn] # if the reaction has a product in "c" (inward flux) elif cytosol in product_comps: metabolite_fluxes[met] += reaction_fluxes[rxn] # if the metabolite is in the "e" compartment and a product, # sum the fluxes accordingly (outward=negative, inward=positive) elif met in rxn.products: reactant_comps = set([p.compartment for p in rxn.reactants]) # if the reaction has no reactant (inward flux) if len(reactant_comps) == 0: metabolite_fluxes[met] += reaction_fluxes[rxn] # if the reaction has a reactant in "c" (outward flux) elif cytosol in reactant_comps: metabolite_fluxes[met] += -reaction_fluxes[rxn] return [m for m, f in iteritems(metabolite_fluxes) if f > 0]
def detect_false_positive_direct_metabolites( candidates, biomass_reactions, cytosol, extra, reaction_fluxes, metabolite_fluxes)
Weed out false positive direct metabolites. False positives exists in the extracellular compartment with flux from the cytosolic compartment and are part of the biomass reaction(s). It sums fluxes positively or negatively depending on if direct metabolites in the extracellular compartment are defined as reactants or products in various reactions. Parameters ---------- candidates : list of cobra.Metabolite Candidate direct metabolites. biomass_reactions : set of cobra.Reaction The biomass reactions. Usually one or two. cytosol : str The identifier of the cytosolic compartment. extra : str The identifier of the extracellular compartment. Returns ------- list Definitive list of direct metabolites, i.e., biomass precursors that are taken up to be consumed by the biomass reaction only.
2.131153
2.088913
1.020221
if len(reaction.metabolites) >= 16: return [reaction] id_of_main_compartment = helpers.find_compartment_id_in_model(model, 'c') gam_mets = ["MNXM3", "MNXM2", "MNXM7", "MNXM1", 'MNXM9'] try: gam = set([helpers.find_met_in_model( model, met, id_of_main_compartment)[0] for met in gam_mets]) except RuntimeError: gam = set() regex = re.compile('^{}(_[a-zA-Z]+?)*?$'.format('biomass'), re.IGNORECASE) biomass_metabolite = set(model.metabolites.query(regex)) macromolecules = set(reaction.metabolites) - gam - biomass_metabolite bundled_reactions = set() for met in macromolecules: bundled_reactions = bundled_reactions | set(met.reactions) return list(bundled_reactions)
def bundle_biomass_components(model, reaction)
Return bundle biomass component reactions if it is not one lumped reaction. There are two basic ways of specifying the biomass composition. The most common is a single lumped reaction containing all biomass precursors. Alternatively, the biomass equation can be split into several reactions each focusing on a different macromolecular component for instance a (1 gDW ash) + b (1 gDW phospholipids) + c (free fatty acids)+ d (1 gDW carbs) + e (1 gDW protein) + f (1 gDW RNA) + g (1 gDW DNA) + h (vitamins/cofactors) + xATP + xH2O-> 1 gDCW biomass + xADP + xH + xPi. This function aims to identify if the given biomass reaction 'reaction', is a lumped all-in-one reaction, or whether it is just the final composing reaction of all macromolecular components. It is important to identify which other reaction belong to a given biomass reaction to be able to identify universal biomass components or calculate detailed precursor stoichiometries. Parameters ---------- model : cobra.Model The metabolic model under investigation. reaction : cobra.core.reaction.Reaction The biomass reaction of the model under investigation. Returns ------- list One or more reactions that qualify as THE biomass equation together. Notes ----- Counting H2O, ADP, Pi, H, and ATP, the amount of metabolites in a split reaction is comparatively low: Any reaction with less or equal to 15 metabolites can probably be counted as a split reaction containing Ash, Phospholipids, Fatty Acids, Carbohydrates (i.e. cell wall components), Protein, RNA, DNA, Cofactors and Vitamins, and Small Molecules. Any reaction with more than or equal to 28 metabolites, however, (21 AA + 3 Nucleotides (4-ATP) + 4 Deoxy-Nucleotides) can be considered a lumped reaction. Anything in between will be treated conservatively as a lumped reaction. For split reactions, after removing any of the metabolites associated with growth-associated energy expenditure (H2O, ADP, Pi, H, and ATP), the only remaining metabolites should be generalized macromolecule precursors e.g. Protein, Phospholipids etc. Each of these have their own composing reactions. Hence we include the reactions of these metabolites in the set that ultimately makes up the returned list of reactions that together make up the biomass equation.
3.768749
3.740791
1.007474
u main_comp = helpers.find_compartment_id_in_model(model, 'c') biomass_eq = bundle_biomass_components(model, reaction) pooled_precursors = set( [met for rxn in biomass_eq for met in rxn.metabolites]) missing_essential_precursors = [] for mnx_id in ESSENTIAL_PRECURSOR_IDS: try: met = helpers.find_met_in_model(model, mnx_id, main_comp)[0] if met not in pooled_precursors: missing_essential_precursors.append(met.id) except RuntimeError: missing_essential_precursors.append(mnx_id) return missing_essential_precursors
def essential_precursors_not_in_biomass(model, reaction)
u""" Return a list of essential precursors missing from the biomass reaction. There are universal components of life that make up the biomass of all known organisms. These include all proteinogenic amino acids, deoxy- and ribonucleotides, water and a range of metabolic cofactors. Parameters ---------- model : cobra.Model The metabolic model under investigation. reaction : cobra.core.reaction.Reaction The biomass reaction of the model under investigation. Returns ------- list IDs of essential metabolites missing from the biomass reaction. The IDS will appear in the models namespace if the metabolite exists, but will be using the MetaNetX namespace if the metabolite does not exist in the model. Notes ----- "Answering the question of what to include in the core of a biomass objective function is not always straightforward. One example is different nucleotide forms, which, although inter-convertible, are essential for cellular chemistry. We propose here that all essential and irreplaceable molecules for metabolism should be included in the biomass functions of genome scale metabolic models. In the special case of cofactors, when two forms of the same cofactor take part in the same reactions (such as NAD and NADH), only one form could be included for the sake of simplicity. When a class of cofactors includes active and non-active interconvertible forms, the active forms should be preferred. [1]_." Please note, that [1]_ also suggest to count C1 carriers (derivatives of tetrahydrofolate(B9) or tetrahydromethanopterin) as universal cofactors. We have omitted these from this check because there are many individual compounds that classify as C1 carriers, and it is not clear a priori which one should be preferred. In a future update, we may consider identifying these using a chemical ontology. References ---------- .. [1] Xavier, J. C., Patil, K. R., & Rocha, I. (2017). Integration of Biomass Formulations of Genome-Scale Metabolic Models with Experimental Data Reveals Universally Essential Cofactors in Prokaryotes. Metabolic Engineering, 39(October 2016), 200–208. http://doi.org/10.1016/j.ymben.2016.12.002
3.510962
3.629886
0.967238
if value is None: return config = ExperimentConfiguration(value) config.validate() return config
def validate_experimental(context, param, value)
Load and validate an experimental data configuration.
6.999813
5.226177
1.339375
try: repo = git.Repo() except git.InvalidGitRepositoryError: LOGGER.warning( "We highly recommend keeping your model in a git repository." " It allows you to track changes and to easily collaborate with" " others via online platforms such as https://github.com.\n") return if repo.is_dirty(): LOGGER.critical( "Please git commit or git stash all changes before running" " the memote suite.") sys.exit(1) return repo
def probe_git()
Return a git repository instance if it exists.
7.02956
6.856704
1.02521
LOGGER.info("Checking `git` installation.") try: check_output(['git', '--version']) except CalledProcessError as e: LOGGER.critical( "The execution of memote was interrupted since no installation of " "`git` could be detected. Please install git to use " "this functionality: " "https://git-scm.com/book/en/v2/Getting-Started-Installing-Git") LOGGER.debug("Underlying error:", exc_info=e) sys.exit(1)
def git_installed()
Interrupt execution of memote if `git` has not been installed.
4.083365
3.336132
1.223982
if commit is None: commit = self._repo.head.commit else: commit = self._repo.commit(commit) return GitInfo( hexsha=commit.hexsha, author=commit.author.name, email=commit.author.email, authored_on=commit.authored_datetime )
def record_git_info(self, commit=None)
Record git meta information. Parameters ---------- commit : str, optional Unique hexsha of the desired commit. Returns ------- GitInfo Git commit meta information.
2.391593
2.441962
0.979374
meta["hexsha"] = git_info.hexsha meta["author"] = git_info.author meta["email"] = git_info.email meta["authored_on"] = git_info.authored_on.isoformat(" ")
def add_git(meta, git_info)
Enrich the result meta information with commit data.
2.965714
2.785661
1.064636
git_info = self.record_git_info(commit) self.add_git(result.meta, git_info) filename = self.get_filename(git_info) super(RepoResultManager, self).store( result, filename=filename, **kwargs)
def store(self, result, commit=None, **kwargs)
Store a result in a JSON file attaching git meta information. Parameters ---------- result : memote.MemoteResult The dictionary structure of results. commit : str, optional Unique hexsha of the desired commit. kwargs : Passed to parent function.
5.823769
5.594126
1.041051
git_info = self.record_git_info(commit) LOGGER.debug("Loading the result for commit '%s'.", git_info.hexsha) filename = self.get_filename(git_info) LOGGER.debug("Loading the result '%s'.", filename) result = super(RepoResultManager, self).load(filename) self.add_git(result.meta, git_info) return result
def load(self, commit=None)
Load a result from the storage directory.
4.765604
4.197292
1.1354
# Default value means we do not resolve a model file. if filename == "default": return filename filename = expanduser(filename) if isabs(filename): return filename else: return join(os.getcwd(), filename)
def normalize(filename)
Return an absolute path of the given file name.
6.615039
6.117683
1.081298
if dtype_conversion is None: dtype_conversion = {"growth": str} super(GrowthExperiment, self).load(dtype_conversion=dtype_conversion) self.data["growth"] = self.data["growth"].isin(self.TRUTHY)
def load(self, dtype_conversion=None)
Load the data table and corresponding validation schema. Parameters ---------- dtype_conversion : dict Column names as keys and corresponding type for loading the data. Please take a look at the `pandas documentation <https://pandas.pydata.org/pandas-docs/stable/io.html#specifying-column-data-types>`__ for detailed explanations.
5.392359
6.789356
0.794237
with model: if self.medium is not None: self.medium.apply(model) if self.objective is not None: model.objective = self.objective model.add_cons_vars(self.constraints) threshold *= model.slim_optimize() growth = list() for row in self.data.itertuples(index=False): with model: exchange = model.reactions.get_by_id(row.exchange) if bool(exchange.reactants): exchange.lower_bound = -row.uptake else: exchange.upper_bound = row.uptake growth.append(model.slim_optimize() >= threshold) return DataFrame({ "exchange": self.data["exchange"], "growth": growth })
def evaluate(self, model, threshold=0.1)
Evaluate in silico growth rates.
4.210703
3.790946
1.110726
try: with BytesIO() as stream: with GzipFile(fileobj=stream, mode="wb") as file_handle: file_handle.write( jsonify(value, pretty=False).encode("utf-8") ) output = stream.getvalue() return output except TypeError as error: log_json_incompatible_types(value) raise_with_traceback(error)
def process_bind_param(self, value, dialect)
Convert the value to a JSON encoded string before storing it.
4.261343
4.144296
1.028243
if value is not None: with BytesIO(value) as stream: with GzipFile(fileobj=stream, mode="rb") as file_handle: value = json.loads(file_handle.read().decode("utf-8")) return value
def process_result_value(self, value, dialect)
Convert a JSON encoded string to a dictionary structure.
3.078033
3.0038
1.024713
min_score = zxcvbn_min_score() message_title = _('Warning') message_body = _( 'This password would take ' '<em class="password_strength_time"></em> to crack.') strength_markup = strength_markup = strength_markup.format( title=message_title, body=message_body, min_score=min_score) try: self.attrs['class'] = '%s password_strength'.strip() % self.attrs['class'] # noqa except KeyError: self.attrs['class'] = 'password_strength' return mark_safe(super(PasswordStrengthInput, self).render( # nosec name, value, attrs) + strength_markup)
def render(self, name, value, attrs=None, **kwargs)
Widget render method.
4.652678
4.617713
1.007572
if self.confirm_with: self.attrs['data-confirm-with'] = 'id_%s' % self.confirm_with confirmation_markup = % (_('Warning'), _("Your passwords don't match.")) try: self.attrs['class'] = '%s password_confirmation'.strip() % self.attrs['class'] # noqa except KeyError: self.attrs['class'] = 'password_confirmation' return mark_safe(super(PasswordConfirmationInput, self).render( # nosec name, value, attrs) + confirmation_markup)
def render(self, name, value, attrs=None, **kwargs)
Widget render method.
4.585577
4.540293
1.009974
user_inputs = [] if user is not None: for attribute in self.user_attributes: if hasattr(user, attribute): user_inputs.append(getattr(user, attribute)) results = zxcvbn(password, user_inputs=user_inputs) if results.get('score', 0) < self.min_score: feedback = ', '.join( results.get('feedback', {}).get('suggestions', [])) raise ValidationError(_(feedback), code=self.code, params={})
def validate(self, password, user=None)
Validate method, run zxcvbn and check score.
3.236324
2.689788
1.203189
parser = MyHTMLParser() parser.feed(html) if parser.is_code: return ('code', parser.data.strip()) elif parser.is_math: return ('math', parser.data.strip()) else: return '', ''
def _get_html_contents(html)
Process a HTML block and detects whether it is a code block, a math block, or a regular HTML block.
3.600249
2.945419
1.222321
if isinstance(s, string_types): try: return op.exists(s) except (OSError, ValueError): return False else: return False
def _is_path(s)
Return whether an object is a path.
3.516944
3.199952
1.099062
if cls._instance is None: # Discover the formats and register them with a new singleton. cls._instance = cls().register_entrypoints() return cls._instance
def format_manager(cls)
Return the instance singleton, creating if necessary
12.310637
9.36516
1.314514
for spec in iter_entry_points(self.entry_point_group): format_properties = {"name": spec.name} try: format_properties.update(spec.load()) except (DistributionNotFound, ImportError) as err: self.log.info( "ipymd format {} could not be loaded: {}".format( spec.name, err)) continue self.register(**format_properties) return self
def register_entrypoints(self)
Look through the `setup_tools` `entry_points` and load all of the formats.
4.450307
3.879689
1.147078
formats = [name for name, format in self._formats.items() if format.get('file_extension', None) == extension] if len(formats) == 0: return None elif len(formats) == 2: raise RuntimeError("Several extensions are registered with " "that extension; please specify the format " "explicitly.") else: return formats[0]
def format_from_extension(self, extension)
Find a format from its extension.
4.002352
3.747113
1.068116
if name is None: name = self.format_from_extension(op.splitext(file)[1]) file_format = self.file_type(name) if file_format == 'text': return _read_text(file) elif file_format == 'json': return _read_json(file) else: load_function = self._formats[name].get('load', None) if load_function is None: raise IOError("The format must declare a file type or " "load/save functions.") return load_function(file)
def load(self, file, name=None)
Load a file. The format name can be specified explicitly or inferred from the file extension.
3.733403
3.516338
1.061731
if name is None: name = self.format_from_extension(op.splitext(file)[1]) file_format = self.file_type(name) if file_format == 'text': _write_text(file, contents) elif file_format == 'json': _write_json(file, contents) else: write_function = self._formats[name].get('save', None) if write_function is None: raise IOError("The format must declare a file type or " "load/save functions.") if op.exists(file) and not overwrite: print("The file already exists, please use overwrite=True.") return write_function(file, contents)
def save(self, file, contents, name=None, overwrite=False)
Save contents into a file. The format name can be specified explicitly or inferred from the file extension.
3.60517
3.398582
1.060786
self._check_format(name) return self._formats[name]['reader'](*args, **kwargs)
def create_reader(self, name, *args, **kwargs)
Create a new reader instance for a given format.
5.228182
3.842536
1.360607
self._check_format(name) return self._formats[name]['writer'](*args, **kwargs)
def create_writer(self, name, *args, **kwargs)
Create a new writer instance for a given format.
5.919521
4.110335
1.440155
# Load the file if 'contents_or_path' is a path. if _is_path(contents_or_path): contents = self.load(contents_or_path, from_) else: contents = contents_or_path if from_kwargs is None: from_kwargs = {} if to_kwargs is None: to_kwargs = {} if reader is None: reader = (self.create_reader(from_, **from_kwargs) if from_ is not None else None) if writer is None: writer = (self.create_writer(to, **to_kwargs) if to is not None else None) if reader is not None: # Convert from the source format to ipymd cells. cells = [cell for cell in reader.read(contents)] else: # If no reader is specified, 'contents' is assumed to already be # a list of ipymd cells. cells = contents notebook_metadata = [cell for cell in cells if cell["cell_type"] == "notebook_metadata"] if writer is not None: if notebook_metadata: [cells.remove(cell) for cell in notebook_metadata] notebook_metadata = self.clean_meta( notebook_metadata[0]["metadata"] ) if hasattr(writer, "write_notebook_metadata"): writer.write_notebook_metadata(notebook_metadata) else: print("{} does not support notebook metadata, " "dropping metadata: {}".format( writer, notebook_metadata)) # Convert from ipymd cells to the target format. for cell in cells: meta = self.clean_cell_meta(cell.get("metadata", {})) if not meta: cell.pop("metadata", None) writer.write(cell) return writer.contents else: # If no writer is specified, the output is supposed to be # a list of ipymd cells. return cells
def convert(self, contents_or_path, from_=None, to=None, reader=None, writer=None, from_kwargs=None, to_kwargs=None, )
Convert contents between supported formats. Parameters ---------- contents : str The contents to convert from. from_ : str or None The name of the source format. If None, this is the ipymd_cells format. to : str or None The name of the target format. If None, this is the ipymd_cells format. reader : a Reader instance or None writer : a Writer instance or None from_kwargs : dict Optional keyword arguments to pass to the reader instance. to_kwargs : dict Optional keyword arguments to pass to the writer instance.
2.61288
2.462319
1.061146
if not self.verbose_metadata: default_kernel_name = (self.default_kernel_name or self._km.kernel_name) if (meta.get("kernelspec", {}) .get("name", None) == default_kernel_name): del meta["kernelspec"] meta.pop("language_info", None) return meta
def clean_meta(self, meta)
Removes unwanted metadata Parameters ---------- meta : dict Notebook metadata.
5.08659
4.68349
1.086068
for k, v in DEFAULT_CELL_METADATA.items(): if meta.get(k, None) == v: meta.pop(k, None) return meta
def clean_cell_meta(self, meta)
Remove cell metadata that matches the default cell metadata.
3.178318
2.45977
1.29212
if not regex.startswith('^'): regex = '^' + regex reg = re.compile(regex) return reg.match(line)
def _starts_with_regex(line, regex)
Return whether a line starts with a regex or not.
2.747197
2.386703
1.151043
if prompt is None: prompt = 'python' if prompt == 'python': prompt = PythonPromptManager elif prompt == 'ipython': prompt = IPythonPromptManager # Instanciate the class. if isinstance(prompt, BasePromptManager): return prompt else: return prompt()
def create_prompt(prompt)
Create a prompt manager. Parameters ---------- prompt : str or class driving from BasePromptManager The prompt name ('python' or 'ipython') or a custom PromptManager class.
4.438123
3.054996
1.452742
lines = _to_lines(text) i = 0 for line in lines: if _starts_with_regex(line, self.input_prompt_regex): i += 1 else: break return lines[:i], lines[i:]
def split_input_output(self, text)
Split code into input lines and output lines, according to the input and output prompt templates.
3.727147
3.321986
1.121963
path = path.strip('/') # File extension of the chosen format. file_extension = format_manager().file_extension(self.format) if not self.exists(path): raise web.HTTPError(404, u'No such file or directory: %s' % path) os_path = self._get_os_path(path) if os.path.isdir(os_path): if type not in (None, 'directory'): raise web.HTTPError(400, u'%s is a directory, not a %s' % (path, type), reason='bad type') model = self._dir_model(path, content=content) elif type == 'notebook' or (type is None and (path.endswith('.ipynb') or path.endswith(file_extension))): # NEW model = self._notebook_model(path, content=content) else: if type == 'directory': raise web.HTTPError(400, u'%s is not a directory', reason='bad type') model = self._file_model(path, content=content, format=format) return model
def get(self, path, content=True, type=None, format=None)
Takes a path for an entity and returns its model Parameters ---------- path : str the API path that describes the relative path for the target content : bool Whether to include the contents in the reply type : str, optional The requested type - 'file', 'notebook', or 'directory'. Will raise HTTPError 400 if the content doesn't match. format : str, optional The requested format for file contents. 'text' or 'base64'. Ignored if this returns a notebook or directory model. Returns ------- model : dict the contents model. If content=True, returns the contents of the file or directory as well.
2.916566
2.888348
1.00977
with self.open(os_path, 'r', encoding='utf-8') as f: try: # NEW file_ext = _file_extension(os_path) if file_ext == '.ipynb': return nbformat.read(f, as_version=as_version) else: return convert(os_path, from_=self.format, to='notebook') except Exception as e: raise HTTPError( 400, u"Unreadable Notebook: %s %r" % (os_path, e), )
def _read_notebook(self, os_path, as_version=4)
Read a notebook from an os path.
3.833518
3.703941
1.034984
path = path.strip('/') if 'type' not in model: raise web.HTTPError(400, u'No file type provided') if 'content' not in model and model['type'] != 'directory': raise web.HTTPError(400, u'No file content provided') self.run_pre_save_hook(model=model, path=path) os_path = self._get_os_path(path) self.log.debug("Saving %s", os_path) try: if model['type'] == 'notebook': # NEW file_ext = _file_extension(os_path) if file_ext == '.ipynb': nb = nbformat.from_dict(model['content']) self.check_and_sign(nb, path) self._save_notebook(os_path, nb) else: contents = convert(model['content'], from_='notebook', to=self.format) # Save a text file. if (format_manager().file_type(self.format) in ('text', 'json')): self._save_file(os_path, contents, 'text') # Save to a binary file. else: format_manager().save(os_path, contents, name=self.format, overwrite=True) # One checkpoint should always exist for notebooks. if not self.checkpoints.list_checkpoints(path): self.create_checkpoint(path) elif model['type'] == 'file': # Missing format will be handled internally by _save_file. self._save_file(os_path, model['content'], model.get('format')) elif model['type'] == 'directory': self._save_directory(os_path, model, path) else: raise web.HTTPError(400, "Unhandled contents type: %s" % model['type']) except web.HTTPError: raise except Exception as e: self.log.error(u'Error while saving file: %s %s', path, e, exc_info=True) raise web.HTTPError(500, u'Unexpected error while saving file: %s %s' % (path, e)) validation_message = None if model['type'] == 'notebook': self.validate_notebook_model(model) validation_message = model.get('message', None) model = self.get(path, content=False) if validation_message: model['message'] = validation_message self.run_post_save_hook(model=model, os_path=os_path) return model
def save(self, model, path='')
Save the file model and return the model with no content.
2.902827
2.850232
1.018453
python = _preprocess(python) if not python: return [] lexer = PythonSplitLexer() lexer.read(python) return lexer.chunks
def _split_python(python)
Split Python source into chunks. Chunks are separated by at least two return lines. The break must not be followed by a space. Also, long Python strings spanning several lines are not splitted.
9.342575
8.831892
1.057823
lines = source.splitlines() if all(line.startswith('# ') for line in lines): # The chunk is a Markdown *unless* it is commented Python code. source = '\n'.join(line[2:] for line in lines if not line[2:].startswith('#')) # skip headers if not source: return True # Try to parse the chunk: if it fails, it is Markdown, otherwise, # it is Python. return not _is_python(source) return False
def _is_chunk_markdown(source)
Return whether a chunk contains Markdown contents.
5.51357
5.24234
1.051738
source = '\n'.join('# ' + line.rstrip() for line in source.splitlines()) return source
def _add_hash(source)
Add a leading hash '#' at the beginning of every line in the source.
5.83799
4.050022
1.441471
lines = source.splitlines() # Filters is a list of 'hN' strings where 1 <= N <= 6. headers = [_replace_header_filter(filter) for filter in filters] lines = [line for line in lines if line.startswith(tuple(headers))] return '\n'.join(lines)
def _filter_markdown(source, filters)
Only keep some Markdown headers from a Markdown string.
5.50633
4.545733
1.211318
level = 1 if m.group(2) == '=' else 2 self.renderer.heading(m.group(1), level=level)
def parse_lheading(self, m)
Parse setext heading.
3.8158
3.395778
1.12369
assert n >= 0 text = self._output.getvalue().rstrip('\n') if not text: return self._output = StringIO() self._output.write(text) self._output.write('\n' * n) text = self._output.getvalue() assert text[-n-1] != '\n' assert text[-n:] == '\n' * n
def ensure_newline(self, n)
Make sure there are 'n' line breaks at the end.
2.914395
2.768422
1.052728
body = m.group('body') is_notebook = m.group('sep_close') == '---' if is_notebook: # make it into a valid YAML object by stripping --- body = body.strip()[:-3] + '...' try: if body: return self._meta(yaml.safe_load(m.group('body')), is_notebook) else: return self._meta({'ipymd': {'empty_meta': True}}, is_notebook) except Exception as err: raise Exception(body, err)
def _meta_from_regex(self, m)
Extract and parse YAML metadata from a meta match Notebook metadata must appear at the beginning of the file and follows the Jekyll front-matter convention of dashed delimiters: --- some: yaml --- Cell metadata follows the YAML spec of dashes and periods --- some: yaml ... Both must be followed by at least one blank line (\n\n).
6.308275
6.109488
1.032538
input, output = self._prompt.to_cell(source) return {'cell_type': 'code', 'input': input, 'output': output}
def _code_cell(self, source)
Split the source into input and output.
6.353022
4.734504
1.341856
text = re.sub(r'\r\n|\r', '\n', text) text = text.replace('\t', ' ' * tab) text = text.replace('\u00a0', ' ') text = text.replace('\u2424', '\n') pattern = re.compile(r'^ +$', re.M) text = pattern.sub('', text) text = _rstrip_lines(text) return text
def _preprocess(text, tab=4)
Normalize a text.
2.429376
2.388669
1.017042
diff = difflib.ndiff(text_0.splitlines(), text_1.splitlines()) return _diff_removed_lines(diff)
def _diff(text_0, text_1)
Return a diff between two strings.
3.478276
3.232033
1.076188
with open(file, 'w') as f: return json.dump(contents, f, indent=2, sort_keys=True)
def _write_json(file, contents)
Write a dict to a JSON file.
2.325594
2.2584
1.029753
style = ListStyle(name='_numbered_list') lls = ListLevelStyleNumber(level=1) lls.setAttribute('displaylevels', 1) lls.setAttribute('numsuffix', '. ') lls.setAttribute('numformat', '1') llp = ListLevelProperties() llp.setAttribute('listlevelpositionandspacemode', 'label-alignment') llla = ListLevelLabelAlignment(labelfollowedby='listtab') llla.setAttribute('listtabstopposition', '1.27cm') llla.setAttribute('textindent', '-0.635cm') llla.setAttribute('marginleft', '1.27cm') llp.addElement(llla) # llp.setAttribute('spacebefore', '') # llp.setAttribute('minlabelwidth', '') lls.addElement(llp) style.addElement(lls) return style
def _numbered_style()
Create a numbered list style.
5.391142
5.143142
1.04822
if family == 'paragraph' and 'marginbottom' not in kwargs: kwargs['marginbottom'] = '.5cm' style = Style(name=name, family=family) # Extract paragraph properties. kwargs_par = {} keys = sorted(kwargs.keys()) for k in keys: if 'margin' in k: kwargs_par[k] = kwargs.pop(k) style.addElement(TextProperties(**kwargs)) if kwargs_par: style.addElement(ParagraphProperties(**kwargs_par)) return style
def _create_style(name, family=None, **kwargs)
Helper function for creating a new style.
3.617127
3.569848
1.013244
styles = {} def _add_style(name, **kwargs): styles[name] = _create_style(name, **kwargs) _add_style('heading-1', family='paragraph', fontsize='24pt', fontweight='bold', ) _add_style('heading-2', family='paragraph', fontsize='22pt', fontweight='bold', ) _add_style('heading-3', family='paragraph', fontsize='20pt', fontweight='bold', ) _add_style('heading-4', family='paragraph', fontsize='18pt', fontweight='bold', ) _add_style('heading-5', family='paragraph', fontsize='16pt', fontweight='bold', ) _add_style('heading-6', family='paragraph', fontsize='14pt', fontweight='bold', ) _add_style('normal-paragraph', family='paragraph', fontsize='12pt', marginbottom='0.25cm', ) _add_style('code', family='paragraph', fontsize='10pt', fontweight='bold', fontfamily='Courier New', color='#555555', ) _add_style('quote', family='paragraph', fontsize='12pt', fontstyle='italic', ) _add_style('list-paragraph', family='paragraph', fontsize='12pt', marginbottom='.1cm', ) _add_style('sublist-paragraph', family='paragraph', fontsize='12pt', marginbottom='.1cm', ) _add_style('numbered-list-paragraph', family='paragraph', fontsize='12pt', marginbottom='.1cm', ) _add_style('normal-text', family='text', fontsize='12pt', ) _add_style('italic', family='text', fontstyle='italic', fontsize='12pt', ) _add_style('bold', family='text', fontweight='bold', fontsize='12pt', ) _add_style('url', family='text', fontsize='12pt', fontweight='bold', fontfamily='Courier', ) _add_style('inline-code', family='text', fontsize='10pt', fontweight='bold', fontfamily='Courier New', color='#555555', ) styles['_numbered_list'] = _numbered_style() return styles
def default_styles()
Generate default ODF styles.
1.655593
1.612583
1.026671