repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
sequencelengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
sequencelengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
molmod/molmod
molmod/graphs.py
Graph.full_match
def full_match(self, other): """Find the mapping between vertex indexes in self and other. This also works on disconnected graphs. Derived classes should just implement get_vertex_string and get_edge_string to make this method aware of the different nature of certain vertices. In case molecules, this would make the algorithm sensitive to atom numbers etc. """ # we need normalize subgraphs because these graphs are used as patterns. graphs0 = [ self.get_subgraph(group, normalize=True) for group in self.independent_vertices ] graphs1 = [ other.get_subgraph(group) for group in other.independent_vertices ] if len(graphs0) != len(graphs1): return matches = [] for graph0 in graphs0: pattern = EqualPattern(graph0) found_match = False for i, graph1 in enumerate(graphs1): local_matches = list(GraphSearch(pattern)(graph1, one_match=True)) if len(local_matches) == 1: match = local_matches[0] # we need to restore the relation between the normalized # graph0 and its original indexes old_to_new = OneToOne(( (j, i) for i, j in enumerate(graph0._old_vertex_indexes) )) matches.append(match * old_to_new) del graphs1[i] found_match = True break if not found_match: return result = OneToOne() for match in matches: result.add_relations(match.forward.items()) return result
python
def full_match(self, other): """Find the mapping between vertex indexes in self and other. This also works on disconnected graphs. Derived classes should just implement get_vertex_string and get_edge_string to make this method aware of the different nature of certain vertices. In case molecules, this would make the algorithm sensitive to atom numbers etc. """ # we need normalize subgraphs because these graphs are used as patterns. graphs0 = [ self.get_subgraph(group, normalize=True) for group in self.independent_vertices ] graphs1 = [ other.get_subgraph(group) for group in other.independent_vertices ] if len(graphs0) != len(graphs1): return matches = [] for graph0 in graphs0: pattern = EqualPattern(graph0) found_match = False for i, graph1 in enumerate(graphs1): local_matches = list(GraphSearch(pattern)(graph1, one_match=True)) if len(local_matches) == 1: match = local_matches[0] # we need to restore the relation between the normalized # graph0 and its original indexes old_to_new = OneToOne(( (j, i) for i, j in enumerate(graph0._old_vertex_indexes) )) matches.append(match * old_to_new) del graphs1[i] found_match = True break if not found_match: return result = OneToOne() for match in matches: result.add_relations(match.forward.items()) return result
[ "def", "full_match", "(", "self", ",", "other", ")", ":", "# we need normalize subgraphs because these graphs are used as patterns.", "graphs0", "=", "[", "self", ".", "get_subgraph", "(", "group", ",", "normalize", "=", "True", ")", "for", "group", "in", "self", ".", "independent_vertices", "]", "graphs1", "=", "[", "other", ".", "get_subgraph", "(", "group", ")", "for", "group", "in", "other", ".", "independent_vertices", "]", "if", "len", "(", "graphs0", ")", "!=", "len", "(", "graphs1", ")", ":", "return", "matches", "=", "[", "]", "for", "graph0", "in", "graphs0", ":", "pattern", "=", "EqualPattern", "(", "graph0", ")", "found_match", "=", "False", "for", "i", ",", "graph1", "in", "enumerate", "(", "graphs1", ")", ":", "local_matches", "=", "list", "(", "GraphSearch", "(", "pattern", ")", "(", "graph1", ",", "one_match", "=", "True", ")", ")", "if", "len", "(", "local_matches", ")", "==", "1", ":", "match", "=", "local_matches", "[", "0", "]", "# we need to restore the relation between the normalized", "# graph0 and its original indexes", "old_to_new", "=", "OneToOne", "(", "(", "(", "j", ",", "i", ")", "for", "i", ",", "j", "in", "enumerate", "(", "graph0", ".", "_old_vertex_indexes", ")", ")", ")", "matches", ".", "append", "(", "match", "*", "old_to_new", ")", "del", "graphs1", "[", "i", "]", "found_match", "=", "True", "break", "if", "not", "found_match", ":", "return", "result", "=", "OneToOne", "(", ")", "for", "match", "in", "matches", ":", "result", ".", "add_relations", "(", "match", ".", "forward", ".", "items", "(", ")", ")", "return", "result" ]
Find the mapping between vertex indexes in self and other. This also works on disconnected graphs. Derived classes should just implement get_vertex_string and get_edge_string to make this method aware of the different nature of certain vertices. In case molecules, this would make the algorithm sensitive to atom numbers etc.
[ "Find", "the", "mapping", "between", "vertex", "indexes", "in", "self", "and", "other", "." ]
train
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L782-L828
molmod/molmod
molmod/graphs.py
OneToOne.add_relation
def add_relation(self, source, destination): """Add new a relation to the bejection""" if self.in_sources(source): if self.forward[source] != destination: raise ValueError("Source is already in use. Destination does " "not match.") else: raise ValueError("Source-Destination relation already exists.") elif self.in_destinations(destination): raise ValueError("Destination is already in use. Source does not " "match.") else: self.forward[source] = destination self.reverse[destination] = source
python
def add_relation(self, source, destination): """Add new a relation to the bejection""" if self.in_sources(source): if self.forward[source] != destination: raise ValueError("Source is already in use. Destination does " "not match.") else: raise ValueError("Source-Destination relation already exists.") elif self.in_destinations(destination): raise ValueError("Destination is already in use. Source does not " "match.") else: self.forward[source] = destination self.reverse[destination] = source
[ "def", "add_relation", "(", "self", ",", "source", ",", "destination", ")", ":", "if", "self", ".", "in_sources", "(", "source", ")", ":", "if", "self", ".", "forward", "[", "source", "]", "!=", "destination", ":", "raise", "ValueError", "(", "\"Source is already in use. Destination does \"", "\"not match.\"", ")", "else", ":", "raise", "ValueError", "(", "\"Source-Destination relation already exists.\"", ")", "elif", "self", ".", "in_destinations", "(", "destination", ")", ":", "raise", "ValueError", "(", "\"Destination is already in use. Source does not \"", "\"match.\"", ")", "else", ":", "self", ".", "forward", "[", "source", "]", "=", "destination", "self", ".", "reverse", "[", "destination", "]", "=", "source" ]
Add new a relation to the bejection
[ "Add", "new", "a", "relation", "to", "the", "bejection" ]
train
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L869-L882
molmod/molmod
molmod/graphs.py
OneToOne.add_relations
def add_relations(self, relations): """Add multiple relations to a bijection""" for source, destination in relations: self.add_relation(source, destination)
python
def add_relations(self, relations): """Add multiple relations to a bijection""" for source, destination in relations: self.add_relation(source, destination)
[ "def", "add_relations", "(", "self", ",", "relations", ")", ":", "for", "source", ",", "destination", "in", "relations", ":", "self", ".", "add_relation", "(", "source", ",", "destination", ")" ]
Add multiple relations to a bijection
[ "Add", "multiple", "relations", "to", "a", "bijection" ]
train
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L884-L887
molmod/molmod
molmod/graphs.py
OneToOne.inverse
def inverse(self): """Returns the inverse bijection.""" result = self.__class__() result.forward = copy.copy(self.reverse) result.reverse = copy.copy(self.forward) return result
python
def inverse(self): """Returns the inverse bijection.""" result = self.__class__() result.forward = copy.copy(self.reverse) result.reverse = copy.copy(self.forward) return result
[ "def", "inverse", "(", "self", ")", ":", "result", "=", "self", ".", "__class__", "(", ")", "result", ".", "forward", "=", "copy", ".", "copy", "(", "self", ".", "reverse", ")", "result", ".", "reverse", "=", "copy", ".", "copy", "(", "self", ".", "forward", ")", "return", "result" ]
Returns the inverse bijection.
[ "Returns", "the", "inverse", "bijection", "." ]
train
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L905-L910
molmod/molmod
molmod/graphs.py
Match.from_first_relation
def from_first_relation(cls, vertex0, vertex1): """Intialize a fresh match based on the first relation""" result = cls([(vertex0, vertex1)]) result.previous_ends1 = set([vertex1]) return result
python
def from_first_relation(cls, vertex0, vertex1): """Intialize a fresh match based on the first relation""" result = cls([(vertex0, vertex1)]) result.previous_ends1 = set([vertex1]) return result
[ "def", "from_first_relation", "(", "cls", ",", "vertex0", ",", "vertex1", ")", ":", "result", "=", "cls", "(", "[", "(", "vertex0", ",", "vertex1", ")", "]", ")", "result", ".", "previous_ends1", "=", "set", "(", "[", "vertex1", "]", ")", "return", "result" ]
Intialize a fresh match based on the first relation
[ "Intialize", "a", "fresh", "match", "based", "on", "the", "first", "relation" ]
train
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L916-L920
molmod/molmod
molmod/graphs.py
Match.get_new_edges
def get_new_edges(self, subject_graph): """Get new edges from the subject graph for the graph search algorithm The Graph search algorithm extends the matches iteratively by adding matching vertices that are one edge further from the starting vertex at each iteration. """ result = [] #print "Match.get_new_edges self.previous_ends1", self.previous_ends1 for vertex in self.previous_ends1: for neighbor in subject_graph.neighbors[vertex]: if neighbor not in self.reverse: result.append((vertex, neighbor)) return result
python
def get_new_edges(self, subject_graph): """Get new edges from the subject graph for the graph search algorithm The Graph search algorithm extends the matches iteratively by adding matching vertices that are one edge further from the starting vertex at each iteration. """ result = [] #print "Match.get_new_edges self.previous_ends1", self.previous_ends1 for vertex in self.previous_ends1: for neighbor in subject_graph.neighbors[vertex]: if neighbor not in self.reverse: result.append((vertex, neighbor)) return result
[ "def", "get_new_edges", "(", "self", ",", "subject_graph", ")", ":", "result", "=", "[", "]", "#print \"Match.get_new_edges self.previous_ends1\", self.previous_ends1", "for", "vertex", "in", "self", ".", "previous_ends1", ":", "for", "neighbor", "in", "subject_graph", ".", "neighbors", "[", "vertex", "]", ":", "if", "neighbor", "not", "in", "self", ".", "reverse", ":", "result", ".", "append", "(", "(", "vertex", ",", "neighbor", ")", ")", "return", "result" ]
Get new edges from the subject graph for the graph search algorithm The Graph search algorithm extends the matches iteratively by adding matching vertices that are one edge further from the starting vertex at each iteration.
[ "Get", "new", "edges", "from", "the", "subject", "graph", "for", "the", "graph", "search", "algorithm" ]
train
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L922-L935
molmod/molmod
molmod/graphs.py
Match.copy_with_new_relations
def copy_with_new_relations(self, new_relations): """Create a new match object extended with new relations""" result = self.__class__(self.forward.items()) result.add_relations(new_relations.items()) result.previous_ends1 = set(new_relations.values()) return result
python
def copy_with_new_relations(self, new_relations): """Create a new match object extended with new relations""" result = self.__class__(self.forward.items()) result.add_relations(new_relations.items()) result.previous_ends1 = set(new_relations.values()) return result
[ "def", "copy_with_new_relations", "(", "self", ",", "new_relations", ")", ":", "result", "=", "self", ".", "__class__", "(", "self", ".", "forward", ".", "items", "(", ")", ")", "result", ".", "add_relations", "(", "new_relations", ".", "items", "(", ")", ")", "result", ".", "previous_ends1", "=", "set", "(", "new_relations", ".", "values", "(", ")", ")", "return", "result" ]
Create a new match object extended with new relations
[ "Create", "a", "new", "match", "object", "extended", "with", "new", "relations" ]
train
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L937-L942
molmod/molmod
molmod/graphs.py
CustomPattern._set_pattern_graph
def _set_pattern_graph(self, pattern_graph): """Initialize the pattern_graph""" self.pattern_graph = pattern_graph self.level_edges = {} self.level_constraints = {} self.duplicate_checks = set([]) if pattern_graph is None: return if len(pattern_graph.independent_vertices) != 1: raise ValueError("A pattern_graph must not be a disconnected " "graph.") # A) the levels for the incremental pattern matching ibfe = self.pattern_graph.iter_breadth_first_edges(self.start_vertex) for edge, distance, constraint in ibfe: if constraint: l = self.level_constraints.setdefault(distance-1, []) else: l = self.level_edges.setdefault(distance, []) l.append(edge) #print "level_edges", self.level_edges #print "level_constraints", self.level_constraints # B) The comparisons the should be checked when one wants to avoid # symmetrically duplicate pattern matches if self.criteria_sets is not None: for cycles in pattern_graph.symmetry_cycles: if len(cycles) > 0: self.duplicate_checks.add((cycles[0][0], cycles[0][1]))
python
def _set_pattern_graph(self, pattern_graph): """Initialize the pattern_graph""" self.pattern_graph = pattern_graph self.level_edges = {} self.level_constraints = {} self.duplicate_checks = set([]) if pattern_graph is None: return if len(pattern_graph.independent_vertices) != 1: raise ValueError("A pattern_graph must not be a disconnected " "graph.") # A) the levels for the incremental pattern matching ibfe = self.pattern_graph.iter_breadth_first_edges(self.start_vertex) for edge, distance, constraint in ibfe: if constraint: l = self.level_constraints.setdefault(distance-1, []) else: l = self.level_edges.setdefault(distance, []) l.append(edge) #print "level_edges", self.level_edges #print "level_constraints", self.level_constraints # B) The comparisons the should be checked when one wants to avoid # symmetrically duplicate pattern matches if self.criteria_sets is not None: for cycles in pattern_graph.symmetry_cycles: if len(cycles) > 0: self.duplicate_checks.add((cycles[0][0], cycles[0][1]))
[ "def", "_set_pattern_graph", "(", "self", ",", "pattern_graph", ")", ":", "self", ".", "pattern_graph", "=", "pattern_graph", "self", ".", "level_edges", "=", "{", "}", "self", ".", "level_constraints", "=", "{", "}", "self", ".", "duplicate_checks", "=", "set", "(", "[", "]", ")", "if", "pattern_graph", "is", "None", ":", "return", "if", "len", "(", "pattern_graph", ".", "independent_vertices", ")", "!=", "1", ":", "raise", "ValueError", "(", "\"A pattern_graph must not be a disconnected \"", "\"graph.\"", ")", "# A) the levels for the incremental pattern matching", "ibfe", "=", "self", ".", "pattern_graph", ".", "iter_breadth_first_edges", "(", "self", ".", "start_vertex", ")", "for", "edge", ",", "distance", ",", "constraint", "in", "ibfe", ":", "if", "constraint", ":", "l", "=", "self", ".", "level_constraints", ".", "setdefault", "(", "distance", "-", "1", ",", "[", "]", ")", "else", ":", "l", "=", "self", ".", "level_edges", ".", "setdefault", "(", "distance", ",", "[", "]", ")", "l", ".", "append", "(", "edge", ")", "#print \"level_edges\", self.level_edges", "#print \"level_constraints\", self.level_constraints", "# B) The comparisons the should be checked when one wants to avoid", "# symmetrically duplicate pattern matches", "if", "self", ".", "criteria_sets", "is", "not", "None", ":", "for", "cycles", "in", "pattern_graph", ".", "symmetry_cycles", ":", "if", "len", "(", "cycles", ")", ">", "0", ":", "self", ".", "duplicate_checks", ".", "add", "(", "(", "cycles", "[", "0", "]", "[", "0", "]", ",", "cycles", "[", "0", "]", "[", "1", "]", ")", ")" ]
Initialize the pattern_graph
[ "Initialize", "the", "pattern_graph" ]
train
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L1212-L1238
molmod/molmod
molmod/graphs.py
CustomPattern.iter_initial_relations
def iter_initial_relations(self, subject_graph): """Iterate over all valid initial relations for a match""" vertex0 = self.start_vertex for vertex1 in range(subject_graph.num_vertices): if self.compare(vertex0, vertex1, subject_graph): yield vertex0, vertex1
python
def iter_initial_relations(self, subject_graph): """Iterate over all valid initial relations for a match""" vertex0 = self.start_vertex for vertex1 in range(subject_graph.num_vertices): if self.compare(vertex0, vertex1, subject_graph): yield vertex0, vertex1
[ "def", "iter_initial_relations", "(", "self", ",", "subject_graph", ")", ":", "vertex0", "=", "self", ".", "start_vertex", "for", "vertex1", "in", "range", "(", "subject_graph", ".", "num_vertices", ")", ":", "if", "self", ".", "compare", "(", "vertex0", ",", "vertex1", ",", "subject_graph", ")", ":", "yield", "vertex0", ",", "vertex1" ]
Iterate over all valid initial relations for a match
[ "Iterate", "over", "all", "valid", "initial", "relations", "for", "a", "match" ]
train
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L1241-L1246
molmod/molmod
molmod/graphs.py
CustomPattern.get_new_edges
def get_new_edges(self, level): """Get new edges from the pattern graph for the graph search algorithm The level argument denotes the distance of the new edges from the starting vertex in the pattern graph. """ return ( self.level_edges.get(level, []), self.level_constraints.get(level, []) )
python
def get_new_edges(self, level): """Get new edges from the pattern graph for the graph search algorithm The level argument denotes the distance of the new edges from the starting vertex in the pattern graph. """ return ( self.level_edges.get(level, []), self.level_constraints.get(level, []) )
[ "def", "get_new_edges", "(", "self", ",", "level", ")", ":", "return", "(", "self", ".", "level_edges", ".", "get", "(", "level", ",", "[", "]", ")", ",", "self", ".", "level_constraints", ".", "get", "(", "level", ",", "[", "]", ")", ")" ]
Get new edges from the pattern graph for the graph search algorithm The level argument denotes the distance of the new edges from the starting vertex in the pattern graph.
[ "Get", "new", "edges", "from", "the", "pattern", "graph", "for", "the", "graph", "search", "algorithm" ]
train
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L1248-L1257
molmod/molmod
molmod/graphs.py
CustomPattern.check_next_match
def check_next_match(self, match, new_relations, subject_graph, one_match): """Check if the (onset for a) match can be a valid""" # only returns true for ecaxtly one set of new_relations from all the # ones that are symmetrically equivalent if not (self.criteria_sets is None or one_match): for check in self.duplicate_checks: vertex_a = new_relations.get(check[0]) vertex_b = new_relations.get(check[1]) if vertex_a is None and vertex_b is None: continue # if this pair is completely absent in the new # relations, it is either completely in the match or it # is to be matched. So it is either already checked for # symmetry duplicates, or it will be check in future. if vertex_a is None: # maybe vertex_a is in the match and vertex_b is the only # one in the new relations. try to get vertex_a from the # match. vertex_a = match.forward.get(check[0]) if vertex_a is None: # ok, vertex_a is to be found, don't care about it right # now. it will be checked in future calls. continue elif vertex_b is None: # maybe vertex_b is in the match and vertex_a is the only # one in the new relations. try to get vertex_b from the # match. vertex_b = match.forward.get(check[1]) if vertex_b is None: # ok, vertex_b is to be found, don't care about it right # now. it will be checked in future calls. continue if vertex_a > vertex_b: # Why does this work? The answer is not so easy to explain, # and certainly not easy to find. if vertex_a > vertex_b, it # means that there is a symmetry operation that leads to # an equivalent match where vertex_b < vertex_a. The latter # match is preferred for as much pairs (vertex_a, vertex_b) # as possible without rejecting all possible matches. The # real difficulty is to construct a proper list of # (vertex_a, vertex_b) pairs that will reject all but one # matches. I conjecture that this list contains all the # first two vertices from each normalized symmetry cycle of # the pattern graph. We need a math guy to do the proof. -- Toon return False return True return True
python
def check_next_match(self, match, new_relations, subject_graph, one_match): """Check if the (onset for a) match can be a valid""" # only returns true for ecaxtly one set of new_relations from all the # ones that are symmetrically equivalent if not (self.criteria_sets is None or one_match): for check in self.duplicate_checks: vertex_a = new_relations.get(check[0]) vertex_b = new_relations.get(check[1]) if vertex_a is None and vertex_b is None: continue # if this pair is completely absent in the new # relations, it is either completely in the match or it # is to be matched. So it is either already checked for # symmetry duplicates, or it will be check in future. if vertex_a is None: # maybe vertex_a is in the match and vertex_b is the only # one in the new relations. try to get vertex_a from the # match. vertex_a = match.forward.get(check[0]) if vertex_a is None: # ok, vertex_a is to be found, don't care about it right # now. it will be checked in future calls. continue elif vertex_b is None: # maybe vertex_b is in the match and vertex_a is the only # one in the new relations. try to get vertex_b from the # match. vertex_b = match.forward.get(check[1]) if vertex_b is None: # ok, vertex_b is to be found, don't care about it right # now. it will be checked in future calls. continue if vertex_a > vertex_b: # Why does this work? The answer is not so easy to explain, # and certainly not easy to find. if vertex_a > vertex_b, it # means that there is a symmetry operation that leads to # an equivalent match where vertex_b < vertex_a. The latter # match is preferred for as much pairs (vertex_a, vertex_b) # as possible without rejecting all possible matches. The # real difficulty is to construct a proper list of # (vertex_a, vertex_b) pairs that will reject all but one # matches. I conjecture that this list contains all the # first two vertices from each normalized symmetry cycle of # the pattern graph. We need a math guy to do the proof. -- Toon return False return True return True
[ "def", "check_next_match", "(", "self", ",", "match", ",", "new_relations", ",", "subject_graph", ",", "one_match", ")", ":", "# only returns true for ecaxtly one set of new_relations from all the", "# ones that are symmetrically equivalent", "if", "not", "(", "self", ".", "criteria_sets", "is", "None", "or", "one_match", ")", ":", "for", "check", "in", "self", ".", "duplicate_checks", ":", "vertex_a", "=", "new_relations", ".", "get", "(", "check", "[", "0", "]", ")", "vertex_b", "=", "new_relations", ".", "get", "(", "check", "[", "1", "]", ")", "if", "vertex_a", "is", "None", "and", "vertex_b", "is", "None", ":", "continue", "# if this pair is completely absent in the new", "# relations, it is either completely in the match or it", "# is to be matched. So it is either already checked for", "# symmetry duplicates, or it will be check in future.", "if", "vertex_a", "is", "None", ":", "# maybe vertex_a is in the match and vertex_b is the only", "# one in the new relations. try to get vertex_a from the", "# match.", "vertex_a", "=", "match", ".", "forward", ".", "get", "(", "check", "[", "0", "]", ")", "if", "vertex_a", "is", "None", ":", "# ok, vertex_a is to be found, don't care about it right", "# now. it will be checked in future calls.", "continue", "elif", "vertex_b", "is", "None", ":", "# maybe vertex_b is in the match and vertex_a is the only", "# one in the new relations. try to get vertex_b from the", "# match.", "vertex_b", "=", "match", ".", "forward", ".", "get", "(", "check", "[", "1", "]", ")", "if", "vertex_b", "is", "None", ":", "# ok, vertex_b is to be found, don't care about it right", "# now. it will be checked in future calls.", "continue", "if", "vertex_a", ">", "vertex_b", ":", "# Why does this work? The answer is not so easy to explain,", "# and certainly not easy to find. if vertex_a > vertex_b, it", "# means that there is a symmetry operation that leads to", "# an equivalent match where vertex_b < vertex_a. The latter", "# match is preferred for as much pairs (vertex_a, vertex_b)", "# as possible without rejecting all possible matches. The", "# real difficulty is to construct a proper list of", "# (vertex_a, vertex_b) pairs that will reject all but one", "# matches. I conjecture that this list contains all the", "# first two vertices from each normalized symmetry cycle of", "# the pattern graph. We need a math guy to do the proof. -- Toon", "return", "False", "return", "True", "return", "True" ]
Check if the (onset for a) match can be a valid
[ "Check", "if", "the", "(", "onset", "for", "a", ")", "match", "can", "be", "a", "valid" ]
train
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L1259-L1304
molmod/molmod
molmod/graphs.py
CustomPattern.iter_final_matches
def iter_final_matches(self, canonical_match, subject_graph, one_match): """Given a match, iterate over all related equivalent matches When criteria sets are defined, the iterator runs over all symmetric equivalent matches that fulfill one of the criteria sets. When not criteria sets are defined, the iterator only yields the input match. """ if self.criteria_sets is None or one_match: yield canonical_match else: for criteria_set in self.criteria_sets: satisfied_match_tags = set([]) for symmetry in self.pattern_graph.symmetries: final_match = canonical_match * symmetry #print final_match if criteria_set.test_match(final_match, self.pattern_graph, subject_graph): match_tags = tuple( self.vertex_tags.get(symmetry.reverse[vertex0]) for vertex0 in range(self.pattern_graph.num_vertices) ) if match_tags not in satisfied_match_tags: final_match.__dict__.update(criteria_set.info) yield final_match satisfied_match_tags.add(match_tags)
python
def iter_final_matches(self, canonical_match, subject_graph, one_match): """Given a match, iterate over all related equivalent matches When criteria sets are defined, the iterator runs over all symmetric equivalent matches that fulfill one of the criteria sets. When not criteria sets are defined, the iterator only yields the input match. """ if self.criteria_sets is None or one_match: yield canonical_match else: for criteria_set in self.criteria_sets: satisfied_match_tags = set([]) for symmetry in self.pattern_graph.symmetries: final_match = canonical_match * symmetry #print final_match if criteria_set.test_match(final_match, self.pattern_graph, subject_graph): match_tags = tuple( self.vertex_tags.get(symmetry.reverse[vertex0]) for vertex0 in range(self.pattern_graph.num_vertices) ) if match_tags not in satisfied_match_tags: final_match.__dict__.update(criteria_set.info) yield final_match satisfied_match_tags.add(match_tags)
[ "def", "iter_final_matches", "(", "self", ",", "canonical_match", ",", "subject_graph", ",", "one_match", ")", ":", "if", "self", ".", "criteria_sets", "is", "None", "or", "one_match", ":", "yield", "canonical_match", "else", ":", "for", "criteria_set", "in", "self", ".", "criteria_sets", ":", "satisfied_match_tags", "=", "set", "(", "[", "]", ")", "for", "symmetry", "in", "self", ".", "pattern_graph", ".", "symmetries", ":", "final_match", "=", "canonical_match", "*", "symmetry", "#print final_match", "if", "criteria_set", ".", "test_match", "(", "final_match", ",", "self", ".", "pattern_graph", ",", "subject_graph", ")", ":", "match_tags", "=", "tuple", "(", "self", ".", "vertex_tags", ".", "get", "(", "symmetry", ".", "reverse", "[", "vertex0", "]", ")", "for", "vertex0", "in", "range", "(", "self", ".", "pattern_graph", ".", "num_vertices", ")", ")", "if", "match_tags", "not", "in", "satisfied_match_tags", ":", "final_match", ".", "__dict__", ".", "update", "(", "criteria_set", ".", "info", ")", "yield", "final_match", "satisfied_match_tags", ".", "add", "(", "match_tags", ")" ]
Given a match, iterate over all related equivalent matches When criteria sets are defined, the iterator runs over all symmetric equivalent matches that fulfill one of the criteria sets. When not criteria sets are defined, the iterator only yields the input match.
[ "Given", "a", "match", "iterate", "over", "all", "related", "equivalent", "matches" ]
train
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L1310-L1334
molmod/molmod
molmod/graphs.py
EqualMatch.get_closed_cycles
def get_closed_cycles(self): """Return the closed cycles corresponding to this permutation The cycle will be normalized to facilitate the elimination of duplicates. The following is guaranteed: 1) If this permutation is represented by disconnected cycles, the cycles will be sorted by the lowest index they contain. 2) Each cycle starts with its lowest index. (unique starting point) 3) Singletons are discarded. (because they are boring) """ # A) construct all the cycles closed_cycles = [] todo = set(self.forward.keys()) if todo != set(self.forward.values()): raise GraphError("The subject and pattern graph must have the same " "numbering.") current_vertex = None while len(todo) > 0: if current_vertex == None: current_vertex = todo.pop() current_cycle = [] else: todo.discard(current_vertex) current_cycle.append(current_vertex) next_vertex = self.get_destination(current_vertex) if next_vertex == current_cycle[0]: if len(current_cycle) > 1: # bring the lowest element in front pivot = np.argmin(current_cycle) current_cycle = current_cycle[pivot:] + \ current_cycle[:pivot] closed_cycles.append(current_cycle) current_vertex = None else: current_vertex = next_vertex # B) normalize the cycle representation closed_cycles.sort() # a normal sort is sufficient because only the # first item of each cycle is considered # transform the structure into a tuple of tuples closed_cycles = tuple(tuple(cycle) for cycle in closed_cycles) return closed_cycles
python
def get_closed_cycles(self): """Return the closed cycles corresponding to this permutation The cycle will be normalized to facilitate the elimination of duplicates. The following is guaranteed: 1) If this permutation is represented by disconnected cycles, the cycles will be sorted by the lowest index they contain. 2) Each cycle starts with its lowest index. (unique starting point) 3) Singletons are discarded. (because they are boring) """ # A) construct all the cycles closed_cycles = [] todo = set(self.forward.keys()) if todo != set(self.forward.values()): raise GraphError("The subject and pattern graph must have the same " "numbering.") current_vertex = None while len(todo) > 0: if current_vertex == None: current_vertex = todo.pop() current_cycle = [] else: todo.discard(current_vertex) current_cycle.append(current_vertex) next_vertex = self.get_destination(current_vertex) if next_vertex == current_cycle[0]: if len(current_cycle) > 1: # bring the lowest element in front pivot = np.argmin(current_cycle) current_cycle = current_cycle[pivot:] + \ current_cycle[:pivot] closed_cycles.append(current_cycle) current_vertex = None else: current_vertex = next_vertex # B) normalize the cycle representation closed_cycles.sort() # a normal sort is sufficient because only the # first item of each cycle is considered # transform the structure into a tuple of tuples closed_cycles = tuple(tuple(cycle) for cycle in closed_cycles) return closed_cycles
[ "def", "get_closed_cycles", "(", "self", ")", ":", "# A) construct all the cycles", "closed_cycles", "=", "[", "]", "todo", "=", "set", "(", "self", ".", "forward", ".", "keys", "(", ")", ")", "if", "todo", "!=", "set", "(", "self", ".", "forward", ".", "values", "(", ")", ")", ":", "raise", "GraphError", "(", "\"The subject and pattern graph must have the same \"", "\"numbering.\"", ")", "current_vertex", "=", "None", "while", "len", "(", "todo", ")", ">", "0", ":", "if", "current_vertex", "==", "None", ":", "current_vertex", "=", "todo", ".", "pop", "(", ")", "current_cycle", "=", "[", "]", "else", ":", "todo", ".", "discard", "(", "current_vertex", ")", "current_cycle", ".", "append", "(", "current_vertex", ")", "next_vertex", "=", "self", ".", "get_destination", "(", "current_vertex", ")", "if", "next_vertex", "==", "current_cycle", "[", "0", "]", ":", "if", "len", "(", "current_cycle", ")", ">", "1", ":", "# bring the lowest element in front", "pivot", "=", "np", ".", "argmin", "(", "current_cycle", ")", "current_cycle", "=", "current_cycle", "[", "pivot", ":", "]", "+", "current_cycle", "[", ":", "pivot", "]", "closed_cycles", ".", "append", "(", "current_cycle", ")", "current_vertex", "=", "None", "else", ":", "current_vertex", "=", "next_vertex", "# B) normalize the cycle representation", "closed_cycles", ".", "sort", "(", ")", "# a normal sort is sufficient because only the", "# first item of each cycle is considered", "# transform the structure into a tuple of tuples", "closed_cycles", "=", "tuple", "(", "tuple", "(", "cycle", ")", "for", "cycle", "in", "closed_cycles", ")", "return", "closed_cycles" ]
Return the closed cycles corresponding to this permutation The cycle will be normalized to facilitate the elimination of duplicates. The following is guaranteed: 1) If this permutation is represented by disconnected cycles, the cycles will be sorted by the lowest index they contain. 2) Each cycle starts with its lowest index. (unique starting point) 3) Singletons are discarded. (because they are boring)
[ "Return", "the", "closed", "cycles", "corresponding", "to", "this", "permutation" ]
train
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L1339-L1381
molmod/molmod
molmod/graphs.py
EqualPattern.iter_initial_relations
def iter_initial_relations(self, subject_graph): """Iterate over all valid initial relations for a match""" if self.pattern_graph.num_edges != subject_graph.num_edges: return # don't even try for pair in CustomPattern.iter_initial_relations(self, subject_graph): yield pair
python
def iter_initial_relations(self, subject_graph): """Iterate over all valid initial relations for a match""" if self.pattern_graph.num_edges != subject_graph.num_edges: return # don't even try for pair in CustomPattern.iter_initial_relations(self, subject_graph): yield pair
[ "def", "iter_initial_relations", "(", "self", ",", "subject_graph", ")", ":", "if", "self", ".", "pattern_graph", ".", "num_edges", "!=", "subject_graph", ".", "num_edges", ":", "return", "# don't even try", "for", "pair", "in", "CustomPattern", ".", "iter_initial_relations", "(", "self", ",", "subject_graph", ")", ":", "yield", "pair" ]
Iterate over all valid initial relations for a match
[ "Iterate", "over", "all", "valid", "initial", "relations", "for", "a", "match" ]
train
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L1395-L1400
molmod/molmod
molmod/graphs.py
EqualPattern.compare
def compare(self, vertex0, vertex1, subject_graph): """Returns true when the two vertices are of the same kind""" return ( self.pattern_graph.vertex_fingerprints[vertex0] == subject_graph.vertex_fingerprints[vertex1] ).all()
python
def compare(self, vertex0, vertex1, subject_graph): """Returns true when the two vertices are of the same kind""" return ( self.pattern_graph.vertex_fingerprints[vertex0] == subject_graph.vertex_fingerprints[vertex1] ).all()
[ "def", "compare", "(", "self", ",", "vertex0", ",", "vertex1", ",", "subject_graph", ")", ":", "return", "(", "self", ".", "pattern_graph", ".", "vertex_fingerprints", "[", "vertex0", "]", "==", "subject_graph", ".", "vertex_fingerprints", "[", "vertex1", "]", ")", ".", "all", "(", ")" ]
Returns true when the two vertices are of the same kind
[ "Returns", "true", "when", "the", "two", "vertices", "are", "of", "the", "same", "kind" ]
train
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L1402-L1407
molmod/molmod
molmod/graphs.py
RingPattern.iter_initial_relations
def iter_initial_relations(self, subject_graph): """Iterate over all valid initial relations for a match""" vertex0 = 0 for vertex1 in range(subject_graph.num_vertices): yield vertex0, vertex1
python
def iter_initial_relations(self, subject_graph): """Iterate over all valid initial relations for a match""" vertex0 = 0 for vertex1 in range(subject_graph.num_vertices): yield vertex0, vertex1
[ "def", "iter_initial_relations", "(", "self", ",", "subject_graph", ")", ":", "vertex0", "=", "0", "for", "vertex1", "in", "range", "(", "subject_graph", ".", "num_vertices", ")", ":", "yield", "vertex0", ",", "vertex1" ]
Iterate over all valid initial relations for a match
[ "Iterate", "over", "all", "valid", "initial", "relations", "for", "a", "match" ]
train
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L1423-L1427
molmod/molmod
molmod/graphs.py
RingPattern.get_new_edges
def get_new_edges(self, level): """Get new edges from the pattern graph for the graph search algorithm The level argument denotes the distance of the new edges from the starting vertex in the pattern graph. """ if level == 0: edges0 = [(0, 1), (0, 2)] elif level >= (self.max_size-1)//2: edges0 = [] else: l2 = level*2 edges0 = [(l2-1, l2+1), (l2, l2+2)] return edges0, []
python
def get_new_edges(self, level): """Get new edges from the pattern graph for the graph search algorithm The level argument denotes the distance of the new edges from the starting vertex in the pattern graph. """ if level == 0: edges0 = [(0, 1), (0, 2)] elif level >= (self.max_size-1)//2: edges0 = [] else: l2 = level*2 edges0 = [(l2-1, l2+1), (l2, l2+2)] return edges0, []
[ "def", "get_new_edges", "(", "self", ",", "level", ")", ":", "if", "level", "==", "0", ":", "edges0", "=", "[", "(", "0", ",", "1", ")", ",", "(", "0", ",", "2", ")", "]", "elif", "level", ">=", "(", "self", ".", "max_size", "-", "1", ")", "//", "2", ":", "edges0", "=", "[", "]", "else", ":", "l2", "=", "level", "*", "2", "edges0", "=", "[", "(", "l2", "-", "1", ",", "l2", "+", "1", ")", ",", "(", "l2", ",", "l2", "+", "2", ")", "]", "return", "edges0", ",", "[", "]" ]
Get new edges from the pattern graph for the graph search algorithm The level argument denotes the distance of the new edges from the starting vertex in the pattern graph.
[ "Get", "new", "edges", "from", "the", "pattern", "graph", "for", "the", "graph", "search", "algorithm" ]
train
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L1429-L1442
molmod/molmod
molmod/graphs.py
RingPattern.check_next_match
def check_next_match(self, match, new_relations, subject_graph, one_match): """Check if the (onset for a) match can be a valid (part of a) ring""" # avoid duplicate rings (order of traversal) if len(match) == 3: if match.forward[1] < match.forward[2]: #print "RingPattern.check_next_match: duplicate order", match.forward[1], match.forward[2] return False # avoid duplicate rings (starting point) for vertex1 in new_relations.values(): if vertex1 < match.forward[0]: #print "RingPattern.check_next_match: duplicate start", vertex1, match.forward[0] return False # can this ever become a strong ring? for vertex1 in new_relations.values(): paths = list(subject_graph.iter_shortest_paths(vertex1, match.forward[0])) if len(paths) != 1: #print "RingPattern.check_next_match: not strong 1" return False if len(paths[0]) != (len(match)+1)//2: #print "RingPattern.check_next_match: not strong 2" return False return True
python
def check_next_match(self, match, new_relations, subject_graph, one_match): """Check if the (onset for a) match can be a valid (part of a) ring""" # avoid duplicate rings (order of traversal) if len(match) == 3: if match.forward[1] < match.forward[2]: #print "RingPattern.check_next_match: duplicate order", match.forward[1], match.forward[2] return False # avoid duplicate rings (starting point) for vertex1 in new_relations.values(): if vertex1 < match.forward[0]: #print "RingPattern.check_next_match: duplicate start", vertex1, match.forward[0] return False # can this ever become a strong ring? for vertex1 in new_relations.values(): paths = list(subject_graph.iter_shortest_paths(vertex1, match.forward[0])) if len(paths) != 1: #print "RingPattern.check_next_match: not strong 1" return False if len(paths[0]) != (len(match)+1)//2: #print "RingPattern.check_next_match: not strong 2" return False return True
[ "def", "check_next_match", "(", "self", ",", "match", ",", "new_relations", ",", "subject_graph", ",", "one_match", ")", ":", "# avoid duplicate rings (order of traversal)", "if", "len", "(", "match", ")", "==", "3", ":", "if", "match", ".", "forward", "[", "1", "]", "<", "match", ".", "forward", "[", "2", "]", ":", "#print \"RingPattern.check_next_match: duplicate order\", match.forward[1], match.forward[2]", "return", "False", "# avoid duplicate rings (starting point)", "for", "vertex1", "in", "new_relations", ".", "values", "(", ")", ":", "if", "vertex1", "<", "match", ".", "forward", "[", "0", "]", ":", "#print \"RingPattern.check_next_match: duplicate start\", vertex1, match.forward[0]", "return", "False", "# can this ever become a strong ring?", "for", "vertex1", "in", "new_relations", ".", "values", "(", ")", ":", "paths", "=", "list", "(", "subject_graph", ".", "iter_shortest_paths", "(", "vertex1", ",", "match", ".", "forward", "[", "0", "]", ")", ")", "if", "len", "(", "paths", ")", "!=", "1", ":", "#print \"RingPattern.check_next_match: not strong 1\"", "return", "False", "if", "len", "(", "paths", "[", "0", "]", ")", "!=", "(", "len", "(", "match", ")", "+", "1", ")", "//", "2", ":", "#print \"RingPattern.check_next_match: not strong 2\"", "return", "False", "return", "True" ]
Check if the (onset for a) match can be a valid (part of a) ring
[ "Check", "if", "the", "(", "onset", "for", "a", ")", "match", "can", "be", "a", "valid", "(", "part", "of", "a", ")", "ring" ]
train
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L1444-L1465
molmod/molmod
molmod/graphs.py
RingPattern.complete
def complete(self, match, subject_graph): """Check the completeness of a ring match""" size = len(match) # check whether we have an odd strong ring if match.forward[size-1] in subject_graph.neighbors[match.forward[size-2]]: # we have an odd closed cycle. check if this is a strong ring order = list(range(0, size, 2)) + list(range(1, size-1, 2))[::-1] ok = True for i in range(len(order)//2): # Count the number of paths between two opposite points in the # ring. Since the ring has an odd number of vertices, each # vertex has two semi-opposite vertices. count = len(list(subject_graph.iter_shortest_paths( match.forward[order[i]], match.forward[order[(i+size//2)%size]] ))) if count > 1: ok = False break count = len(list(subject_graph.iter_shortest_paths( match.forward[order[i]], match.forward[order[(i+size//2+1)%size]] ))) if count > 1: ok = False break if ok: match.ring_vertices = tuple(match.forward[i] for i in order) #print "RingPattern.complete: found odd ring" return True #print "RingPattern.complete: no odd ring" # check whether we have an even strong ring paths = list(subject_graph.iter_shortest_paths( match.forward[size-1], match.forward[size-2] )) #print "RingPattern.complete: even paths", paths if (size > 3 and len(paths) == 1 and len(paths[0]) == 3) or \ (size == 3 and len(paths) == 2 and len(paths[0]) == 3): path = paths[0] if size == 3 and path[1] == match.forward[0]: path = paths[1] # we have an even closed cycle. check if this is a strong ring match.add_relation(size, path[1]) size += 1 order = list(range(0, size, 2)) + list(range(size-1, 0, -2)) ok = True for i in range(len(order)//2): count = len(list(subject_graph.iter_shortest_paths( match.forward[order[i]], match.forward[order[(i+size//2)%size]] ))) if count != 2: ok = False break if ok: # also check if this does not violate the requirement for a # unique origin: if match.forward[size-1] < match.forward[0]: ok = False if not ok: vertex1 = match.forward[size-1] del match.forward[size-1] del match.reverse[vertex1] size -= 1 #print "RingPattern.complete: no even ring" else: match.ring_vertices = tuple(match.forward[i] for i in order) #print "RingPattern.complete: found even ring" return ok #print "RingPattern.complete: not at all" return False
python
def complete(self, match, subject_graph): """Check the completeness of a ring match""" size = len(match) # check whether we have an odd strong ring if match.forward[size-1] in subject_graph.neighbors[match.forward[size-2]]: # we have an odd closed cycle. check if this is a strong ring order = list(range(0, size, 2)) + list(range(1, size-1, 2))[::-1] ok = True for i in range(len(order)//2): # Count the number of paths between two opposite points in the # ring. Since the ring has an odd number of vertices, each # vertex has two semi-opposite vertices. count = len(list(subject_graph.iter_shortest_paths( match.forward[order[i]], match.forward[order[(i+size//2)%size]] ))) if count > 1: ok = False break count = len(list(subject_graph.iter_shortest_paths( match.forward[order[i]], match.forward[order[(i+size//2+1)%size]] ))) if count > 1: ok = False break if ok: match.ring_vertices = tuple(match.forward[i] for i in order) #print "RingPattern.complete: found odd ring" return True #print "RingPattern.complete: no odd ring" # check whether we have an even strong ring paths = list(subject_graph.iter_shortest_paths( match.forward[size-1], match.forward[size-2] )) #print "RingPattern.complete: even paths", paths if (size > 3 and len(paths) == 1 and len(paths[0]) == 3) or \ (size == 3 and len(paths) == 2 and len(paths[0]) == 3): path = paths[0] if size == 3 and path[1] == match.forward[0]: path = paths[1] # we have an even closed cycle. check if this is a strong ring match.add_relation(size, path[1]) size += 1 order = list(range(0, size, 2)) + list(range(size-1, 0, -2)) ok = True for i in range(len(order)//2): count = len(list(subject_graph.iter_shortest_paths( match.forward[order[i]], match.forward[order[(i+size//2)%size]] ))) if count != 2: ok = False break if ok: # also check if this does not violate the requirement for a # unique origin: if match.forward[size-1] < match.forward[0]: ok = False if not ok: vertex1 = match.forward[size-1] del match.forward[size-1] del match.reverse[vertex1] size -= 1 #print "RingPattern.complete: no even ring" else: match.ring_vertices = tuple(match.forward[i] for i in order) #print "RingPattern.complete: found even ring" return ok #print "RingPattern.complete: not at all" return False
[ "def", "complete", "(", "self", ",", "match", ",", "subject_graph", ")", ":", "size", "=", "len", "(", "match", ")", "# check whether we have an odd strong ring", "if", "match", ".", "forward", "[", "size", "-", "1", "]", "in", "subject_graph", ".", "neighbors", "[", "match", ".", "forward", "[", "size", "-", "2", "]", "]", ":", "# we have an odd closed cycle. check if this is a strong ring", "order", "=", "list", "(", "range", "(", "0", ",", "size", ",", "2", ")", ")", "+", "list", "(", "range", "(", "1", ",", "size", "-", "1", ",", "2", ")", ")", "[", ":", ":", "-", "1", "]", "ok", "=", "True", "for", "i", "in", "range", "(", "len", "(", "order", ")", "//", "2", ")", ":", "# Count the number of paths between two opposite points in the", "# ring. Since the ring has an odd number of vertices, each", "# vertex has two semi-opposite vertices.", "count", "=", "len", "(", "list", "(", "subject_graph", ".", "iter_shortest_paths", "(", "match", ".", "forward", "[", "order", "[", "i", "]", "]", ",", "match", ".", "forward", "[", "order", "[", "(", "i", "+", "size", "//", "2", ")", "%", "size", "]", "]", ")", ")", ")", "if", "count", ">", "1", ":", "ok", "=", "False", "break", "count", "=", "len", "(", "list", "(", "subject_graph", ".", "iter_shortest_paths", "(", "match", ".", "forward", "[", "order", "[", "i", "]", "]", ",", "match", ".", "forward", "[", "order", "[", "(", "i", "+", "size", "//", "2", "+", "1", ")", "%", "size", "]", "]", ")", ")", ")", "if", "count", ">", "1", ":", "ok", "=", "False", "break", "if", "ok", ":", "match", ".", "ring_vertices", "=", "tuple", "(", "match", ".", "forward", "[", "i", "]", "for", "i", "in", "order", ")", "#print \"RingPattern.complete: found odd ring\"", "return", "True", "#print \"RingPattern.complete: no odd ring\"", "# check whether we have an even strong ring", "paths", "=", "list", "(", "subject_graph", ".", "iter_shortest_paths", "(", "match", ".", "forward", "[", "size", "-", "1", "]", ",", "match", ".", "forward", "[", "size", "-", "2", "]", ")", ")", "#print \"RingPattern.complete: even paths\", paths", "if", "(", "size", ">", "3", "and", "len", "(", "paths", ")", "==", "1", "and", "len", "(", "paths", "[", "0", "]", ")", "==", "3", ")", "or", "(", "size", "==", "3", "and", "len", "(", "paths", ")", "==", "2", "and", "len", "(", "paths", "[", "0", "]", ")", "==", "3", ")", ":", "path", "=", "paths", "[", "0", "]", "if", "size", "==", "3", "and", "path", "[", "1", "]", "==", "match", ".", "forward", "[", "0", "]", ":", "path", "=", "paths", "[", "1", "]", "# we have an even closed cycle. check if this is a strong ring", "match", ".", "add_relation", "(", "size", ",", "path", "[", "1", "]", ")", "size", "+=", "1", "order", "=", "list", "(", "range", "(", "0", ",", "size", ",", "2", ")", ")", "+", "list", "(", "range", "(", "size", "-", "1", ",", "0", ",", "-", "2", ")", ")", "ok", "=", "True", "for", "i", "in", "range", "(", "len", "(", "order", ")", "//", "2", ")", ":", "count", "=", "len", "(", "list", "(", "subject_graph", ".", "iter_shortest_paths", "(", "match", ".", "forward", "[", "order", "[", "i", "]", "]", ",", "match", ".", "forward", "[", "order", "[", "(", "i", "+", "size", "//", "2", ")", "%", "size", "]", "]", ")", ")", ")", "if", "count", "!=", "2", ":", "ok", "=", "False", "break", "if", "ok", ":", "# also check if this does not violate the requirement for a", "# unique origin:", "if", "match", ".", "forward", "[", "size", "-", "1", "]", "<", "match", ".", "forward", "[", "0", "]", ":", "ok", "=", "False", "if", "not", "ok", ":", "vertex1", "=", "match", ".", "forward", "[", "size", "-", "1", "]", "del", "match", ".", "forward", "[", "size", "-", "1", "]", "del", "match", ".", "reverse", "[", "vertex1", "]", "size", "-=", "1", "#print \"RingPattern.complete: no even ring\"", "else", ":", "match", ".", "ring_vertices", "=", "tuple", "(", "match", ".", "forward", "[", "i", "]", "for", "i", "in", "order", ")", "#print \"RingPattern.complete: found even ring\"", "return", "ok", "#print \"RingPattern.complete: not at all\"", "return", "False" ]
Check the completeness of a ring match
[ "Check", "the", "completeness", "of", "a", "ring", "match" ]
train
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L1467-L1538
molmod/molmod
molmod/graphs.py
GraphSearch.print_debug
def print_debug(self, text, indent=0): """Only prints debug info on screen when self.debug == True.""" if self.debug: if indent > 0: print(" "*self.debug, text) self.debug += indent if indent <= 0: print(" "*self.debug, text)
python
def print_debug(self, text, indent=0): """Only prints debug info on screen when self.debug == True.""" if self.debug: if indent > 0: print(" "*self.debug, text) self.debug += indent if indent <= 0: print(" "*self.debug, text)
[ "def", "print_debug", "(", "self", ",", "text", ",", "indent", "=", "0", ")", ":", "if", "self", ".", "debug", ":", "if", "indent", ">", "0", ":", "print", "(", "\" \"", "*", "self", ".", "debug", ",", "text", ")", "self", ".", "debug", "+=", "indent", "if", "indent", "<=", "0", ":", "print", "(", "\" \"", "*", "self", ".", "debug", ",", "text", ")" ]
Only prints debug info on screen when self.debug == True.
[ "Only", "prints", "debug", "info", "on", "screen", "when", "self", ".", "debug", "==", "True", "." ]
train
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L1588-L1595
molmod/molmod
molmod/graphs.py
GraphSearch._iter_candidate_groups
def _iter_candidate_groups(self, init_match, edges0, edges1): """Divide the edges into groups""" # collect all end vertices0 and end vertices1 that belong to the same # group. sources = {} for start_vertex0, end_vertex0 in edges0: l = sources.setdefault(start_vertex0, []) l.append(end_vertex0) dests = {} for start_vertex1, end_vertex1 in edges1: start_vertex0 = init_match.reverse[start_vertex1] l = dests.setdefault(start_vertex0, []) l.append(end_vertex1) for start_vertex0, end_vertices0 in sources.items(): end_vertices1 = dests.get(start_vertex0, []) yield end_vertices0, end_vertices1
python
def _iter_candidate_groups(self, init_match, edges0, edges1): """Divide the edges into groups""" # collect all end vertices0 and end vertices1 that belong to the same # group. sources = {} for start_vertex0, end_vertex0 in edges0: l = sources.setdefault(start_vertex0, []) l.append(end_vertex0) dests = {} for start_vertex1, end_vertex1 in edges1: start_vertex0 = init_match.reverse[start_vertex1] l = dests.setdefault(start_vertex0, []) l.append(end_vertex1) for start_vertex0, end_vertices0 in sources.items(): end_vertices1 = dests.get(start_vertex0, []) yield end_vertices0, end_vertices1
[ "def", "_iter_candidate_groups", "(", "self", ",", "init_match", ",", "edges0", ",", "edges1", ")", ":", "# collect all end vertices0 and end vertices1 that belong to the same", "# group.", "sources", "=", "{", "}", "for", "start_vertex0", ",", "end_vertex0", "in", "edges0", ":", "l", "=", "sources", ".", "setdefault", "(", "start_vertex0", ",", "[", "]", ")", "l", ".", "append", "(", "end_vertex0", ")", "dests", "=", "{", "}", "for", "start_vertex1", ",", "end_vertex1", "in", "edges1", ":", "start_vertex0", "=", "init_match", ".", "reverse", "[", "start_vertex1", "]", "l", "=", "dests", ".", "setdefault", "(", "start_vertex0", ",", "[", "]", ")", "l", ".", "append", "(", "end_vertex1", ")", "for", "start_vertex0", ",", "end_vertices0", "in", "sources", ".", "items", "(", ")", ":", "end_vertices1", "=", "dests", ".", "get", "(", "start_vertex0", ",", "[", "]", ")", "yield", "end_vertices0", ",", "end_vertices1" ]
Divide the edges into groups
[ "Divide", "the", "edges", "into", "groups" ]
train
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L1597-L1612
molmod/molmod
molmod/graphs.py
GraphSearch._iter_new_relations
def _iter_new_relations(self, init_match, subject_graph, edges0, constraints0, edges1): """Given an onset for a match, iterate over all possible new key-value pairs""" # Count the number of unique edges0[i][1] values. This is also # the number of new relations. num_new_relations = len(set(j for i, j in edges0)) def combine_small(relations, num): """iterate over all compatible combinations within one set of relations""" if len(relations) == 0: return for i, pivot in enumerate(relations): if num == 1: yield (pivot, ) else: compatible_relations = list( item for item in relations[:i] if pivot[0]!=item[0] and pivot[1]!=item[1] ) for tail in combine_small(compatible_relations, num-1): yield (pivot, ) + tail # generate candidate relations candidate_relations = [] icg = self._iter_candidate_groups(init_match, edges0, edges1) for end_vertices0, end_vertices1 in icg: if len(end_vertices0) > len(end_vertices1): return # this can never work, the subject graph is 'too small' elif not self.pattern.sub and \ len(end_vertices0) != len(end_vertices1): return # an exact match is sought, this can never work l = [] for end_vertex0 in end_vertices0: for end_vertex1 in end_vertices1: if self.pattern.compare(end_vertex0, end_vertex1, subject_graph): l.append((end_vertex0, end_vertex1)) # len(end_vertices0) = the total number of relations that must be # made in this group if len(l) > 0: # turn l into a list of sets of internally compatible candidate # relations in this group l = list(combine_small(l, len(end_vertices0))) candidate_relations.append(l) if len(candidate_relations) == 0: return self.print_debug("candidate_relations: %s" % candidate_relations) def combine_big(pos=0): """Iterate over all possible sets of relations""" # pos is an index in candidate_relations crs = candidate_relations[pos] if pos == len(candidate_relations)-1: for relations in crs: yield relations else: for tail in combine_big(pos+1): for relations in crs: yield relations + tail # final loop for new_relations in combine_big(): new_relations = set(new_relations) self.print_debug("new_relations: %s" % (new_relations, )) # check the total number of new relations if len(new_relations) != num_new_relations: continue # check sanity of relations forward = dict(new_relations) if len(forward) != num_new_relations: continue reverse = dict((j, i) for i, j in new_relations) if len(reverse) != num_new_relations: continue # check the constraints for a0, b0 in constraints0: if forward[a0] not in subject_graph.neighbors[forward[b0]]: forward = None break if forward is None: continue yield forward
python
def _iter_new_relations(self, init_match, subject_graph, edges0, constraints0, edges1): """Given an onset for a match, iterate over all possible new key-value pairs""" # Count the number of unique edges0[i][1] values. This is also # the number of new relations. num_new_relations = len(set(j for i, j in edges0)) def combine_small(relations, num): """iterate over all compatible combinations within one set of relations""" if len(relations) == 0: return for i, pivot in enumerate(relations): if num == 1: yield (pivot, ) else: compatible_relations = list( item for item in relations[:i] if pivot[0]!=item[0] and pivot[1]!=item[1] ) for tail in combine_small(compatible_relations, num-1): yield (pivot, ) + tail # generate candidate relations candidate_relations = [] icg = self._iter_candidate_groups(init_match, edges0, edges1) for end_vertices0, end_vertices1 in icg: if len(end_vertices0) > len(end_vertices1): return # this can never work, the subject graph is 'too small' elif not self.pattern.sub and \ len(end_vertices0) != len(end_vertices1): return # an exact match is sought, this can never work l = [] for end_vertex0 in end_vertices0: for end_vertex1 in end_vertices1: if self.pattern.compare(end_vertex0, end_vertex1, subject_graph): l.append((end_vertex0, end_vertex1)) # len(end_vertices0) = the total number of relations that must be # made in this group if len(l) > 0: # turn l into a list of sets of internally compatible candidate # relations in this group l = list(combine_small(l, len(end_vertices0))) candidate_relations.append(l) if len(candidate_relations) == 0: return self.print_debug("candidate_relations: %s" % candidate_relations) def combine_big(pos=0): """Iterate over all possible sets of relations""" # pos is an index in candidate_relations crs = candidate_relations[pos] if pos == len(candidate_relations)-1: for relations in crs: yield relations else: for tail in combine_big(pos+1): for relations in crs: yield relations + tail # final loop for new_relations in combine_big(): new_relations = set(new_relations) self.print_debug("new_relations: %s" % (new_relations, )) # check the total number of new relations if len(new_relations) != num_new_relations: continue # check sanity of relations forward = dict(new_relations) if len(forward) != num_new_relations: continue reverse = dict((j, i) for i, j in new_relations) if len(reverse) != num_new_relations: continue # check the constraints for a0, b0 in constraints0: if forward[a0] not in subject_graph.neighbors[forward[b0]]: forward = None break if forward is None: continue yield forward
[ "def", "_iter_new_relations", "(", "self", ",", "init_match", ",", "subject_graph", ",", "edges0", ",", "constraints0", ",", "edges1", ")", ":", "# Count the number of unique edges0[i][1] values. This is also", "# the number of new relations.", "num_new_relations", "=", "len", "(", "set", "(", "j", "for", "i", ",", "j", "in", "edges0", ")", ")", "def", "combine_small", "(", "relations", ",", "num", ")", ":", "\"\"\"iterate over all compatible combinations within one set of relations\"\"\"", "if", "len", "(", "relations", ")", "==", "0", ":", "return", "for", "i", ",", "pivot", "in", "enumerate", "(", "relations", ")", ":", "if", "num", "==", "1", ":", "yield", "(", "pivot", ",", ")", "else", ":", "compatible_relations", "=", "list", "(", "item", "for", "item", "in", "relations", "[", ":", "i", "]", "if", "pivot", "[", "0", "]", "!=", "item", "[", "0", "]", "and", "pivot", "[", "1", "]", "!=", "item", "[", "1", "]", ")", "for", "tail", "in", "combine_small", "(", "compatible_relations", ",", "num", "-", "1", ")", ":", "yield", "(", "pivot", ",", ")", "+", "tail", "# generate candidate relations", "candidate_relations", "=", "[", "]", "icg", "=", "self", ".", "_iter_candidate_groups", "(", "init_match", ",", "edges0", ",", "edges1", ")", "for", "end_vertices0", ",", "end_vertices1", "in", "icg", ":", "if", "len", "(", "end_vertices0", ")", ">", "len", "(", "end_vertices1", ")", ":", "return", "# this can never work, the subject graph is 'too small'", "elif", "not", "self", ".", "pattern", ".", "sub", "and", "len", "(", "end_vertices0", ")", "!=", "len", "(", "end_vertices1", ")", ":", "return", "# an exact match is sought, this can never work", "l", "=", "[", "]", "for", "end_vertex0", "in", "end_vertices0", ":", "for", "end_vertex1", "in", "end_vertices1", ":", "if", "self", ".", "pattern", ".", "compare", "(", "end_vertex0", ",", "end_vertex1", ",", "subject_graph", ")", ":", "l", ".", "append", "(", "(", "end_vertex0", ",", "end_vertex1", ")", ")", "# len(end_vertices0) = the total number of relations that must be", "# made in this group", "if", "len", "(", "l", ")", ">", "0", ":", "# turn l into a list of sets of internally compatible candidate", "# relations in this group", "l", "=", "list", "(", "combine_small", "(", "l", ",", "len", "(", "end_vertices0", ")", ")", ")", "candidate_relations", ".", "append", "(", "l", ")", "if", "len", "(", "candidate_relations", ")", "==", "0", ":", "return", "self", ".", "print_debug", "(", "\"candidate_relations: %s\"", "%", "candidate_relations", ")", "def", "combine_big", "(", "pos", "=", "0", ")", ":", "\"\"\"Iterate over all possible sets of relations\"\"\"", "# pos is an index in candidate_relations", "crs", "=", "candidate_relations", "[", "pos", "]", "if", "pos", "==", "len", "(", "candidate_relations", ")", "-", "1", ":", "for", "relations", "in", "crs", ":", "yield", "relations", "else", ":", "for", "tail", "in", "combine_big", "(", "pos", "+", "1", ")", ":", "for", "relations", "in", "crs", ":", "yield", "relations", "+", "tail", "# final loop", "for", "new_relations", "in", "combine_big", "(", ")", ":", "new_relations", "=", "set", "(", "new_relations", ")", "self", ".", "print_debug", "(", "\"new_relations: %s\"", "%", "(", "new_relations", ",", ")", ")", "# check the total number of new relations", "if", "len", "(", "new_relations", ")", "!=", "num_new_relations", ":", "continue", "# check sanity of relations", "forward", "=", "dict", "(", "new_relations", ")", "if", "len", "(", "forward", ")", "!=", "num_new_relations", ":", "continue", "reverse", "=", "dict", "(", "(", "j", ",", "i", ")", "for", "i", ",", "j", "in", "new_relations", ")", "if", "len", "(", "reverse", ")", "!=", "num_new_relations", ":", "continue", "# check the constraints", "for", "a0", ",", "b0", "in", "constraints0", ":", "if", "forward", "[", "a0", "]", "not", "in", "subject_graph", ".", "neighbors", "[", "forward", "[", "b0", "]", "]", ":", "forward", "=", "None", "break", "if", "forward", "is", "None", ":", "continue", "yield", "forward" ]
Given an onset for a match, iterate over all possible new key-value pairs
[ "Given", "an", "onset", "for", "a", "match", "iterate", "over", "all", "possible", "new", "key", "-", "value", "pairs" ]
train
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L1615-L1694
molmod/molmod
molmod/graphs.py
GraphSearch._iter_matches
def _iter_matches(self, input_match, subject_graph, one_match, level=0): """Given an onset for a match, iterate over all completions of that match This iterator works recursively. At each level the match is extended with a new set of relations based on vertices in the pattern graph that are at a distances 'level' from the starting vertex """ self.print_debug("ENTERING _ITER_MATCHES", 1) self.print_debug("input_match: %s" % input_match) # A) collect the new edges in the pattern graph and the subject graph # to extend the match. # # Note that the edges are ordered. edge[0] is always in the match. # edge[1] is never in the match. The constraints contain information # about the end points of edges0. It is a list of two-tuples where # (a, b) means that a and b must be connected. # # Second note: suffix 0 indicates the pattern graph and suffix 1 # is used for the subject graph. edges0, constraints0 = self.pattern.get_new_edges(level) edges1 = input_match.get_new_edges(subject_graph) self.print_debug("edges0: %s" % edges0) self.print_debug("constraints0: %s" % constraints0) self.print_debug("edges1: %s" % edges1) # B) iterate over the sets of new relations: [(vertex0[i], vertex1[j]), # ...] that contain all endpoints of edges0, that satisfy the # constraints0 and where (vertex0[i], vertex1[j]) only occurs if these # are end points of a edge0 and edge1 whose starting points are already # in init_match. These conditions are implemented in an iterator as to # separate concerns. This iterator also calls the routines that check # whether vertex1[j] also satisfies additional conditions inherent # vertex0[i]. inr = self._iter_new_relations(input_match, subject_graph, edges0, constraints0, edges1) for new_relations in inr: # for each set of new_relations, construct a next_match and recurse next_match = input_match.copy_with_new_relations(new_relations) if not self.pattern.check_next_match(next_match, new_relations, subject_graph, one_match): continue if self.pattern.complete(next_match, subject_graph): yield next_match else: for match in self._iter_matches(next_match, subject_graph, one_match, level+1): yield match self.print_debug("LEAVING_ITER_MATCHES", -1)
python
def _iter_matches(self, input_match, subject_graph, one_match, level=0): """Given an onset for a match, iterate over all completions of that match This iterator works recursively. At each level the match is extended with a new set of relations based on vertices in the pattern graph that are at a distances 'level' from the starting vertex """ self.print_debug("ENTERING _ITER_MATCHES", 1) self.print_debug("input_match: %s" % input_match) # A) collect the new edges in the pattern graph and the subject graph # to extend the match. # # Note that the edges are ordered. edge[0] is always in the match. # edge[1] is never in the match. The constraints contain information # about the end points of edges0. It is a list of two-tuples where # (a, b) means that a and b must be connected. # # Second note: suffix 0 indicates the pattern graph and suffix 1 # is used for the subject graph. edges0, constraints0 = self.pattern.get_new_edges(level) edges1 = input_match.get_new_edges(subject_graph) self.print_debug("edges0: %s" % edges0) self.print_debug("constraints0: %s" % constraints0) self.print_debug("edges1: %s" % edges1) # B) iterate over the sets of new relations: [(vertex0[i], vertex1[j]), # ...] that contain all endpoints of edges0, that satisfy the # constraints0 and where (vertex0[i], vertex1[j]) only occurs if these # are end points of a edge0 and edge1 whose starting points are already # in init_match. These conditions are implemented in an iterator as to # separate concerns. This iterator also calls the routines that check # whether vertex1[j] also satisfies additional conditions inherent # vertex0[i]. inr = self._iter_new_relations(input_match, subject_graph, edges0, constraints0, edges1) for new_relations in inr: # for each set of new_relations, construct a next_match and recurse next_match = input_match.copy_with_new_relations(new_relations) if not self.pattern.check_next_match(next_match, new_relations, subject_graph, one_match): continue if self.pattern.complete(next_match, subject_graph): yield next_match else: for match in self._iter_matches(next_match, subject_graph, one_match, level+1): yield match self.print_debug("LEAVING_ITER_MATCHES", -1)
[ "def", "_iter_matches", "(", "self", ",", "input_match", ",", "subject_graph", ",", "one_match", ",", "level", "=", "0", ")", ":", "self", ".", "print_debug", "(", "\"ENTERING _ITER_MATCHES\"", ",", "1", ")", "self", ".", "print_debug", "(", "\"input_match: %s\"", "%", "input_match", ")", "# A) collect the new edges in the pattern graph and the subject graph", "# to extend the match.", "#", "# Note that the edges are ordered. edge[0] is always in the match.", "# edge[1] is never in the match. The constraints contain information", "# about the end points of edges0. It is a list of two-tuples where", "# (a, b) means that a and b must be connected.", "#", "# Second note: suffix 0 indicates the pattern graph and suffix 1", "# is used for the subject graph.", "edges0", ",", "constraints0", "=", "self", ".", "pattern", ".", "get_new_edges", "(", "level", ")", "edges1", "=", "input_match", ".", "get_new_edges", "(", "subject_graph", ")", "self", ".", "print_debug", "(", "\"edges0: %s\"", "%", "edges0", ")", "self", ".", "print_debug", "(", "\"constraints0: %s\"", "%", "constraints0", ")", "self", ".", "print_debug", "(", "\"edges1: %s\"", "%", "edges1", ")", "# B) iterate over the sets of new relations: [(vertex0[i], vertex1[j]),", "# ...] that contain all endpoints of edges0, that satisfy the", "# constraints0 and where (vertex0[i], vertex1[j]) only occurs if these", "# are end points of a edge0 and edge1 whose starting points are already", "# in init_match. These conditions are implemented in an iterator as to", "# separate concerns. This iterator also calls the routines that check", "# whether vertex1[j] also satisfies additional conditions inherent", "# vertex0[i].", "inr", "=", "self", ".", "_iter_new_relations", "(", "input_match", ",", "subject_graph", ",", "edges0", ",", "constraints0", ",", "edges1", ")", "for", "new_relations", "in", "inr", ":", "# for each set of new_relations, construct a next_match and recurse", "next_match", "=", "input_match", ".", "copy_with_new_relations", "(", "new_relations", ")", "if", "not", "self", ".", "pattern", ".", "check_next_match", "(", "next_match", ",", "new_relations", ",", "subject_graph", ",", "one_match", ")", ":", "continue", "if", "self", ".", "pattern", ".", "complete", "(", "next_match", ",", "subject_graph", ")", ":", "yield", "next_match", "else", ":", "for", "match", "in", "self", ".", "_iter_matches", "(", "next_match", ",", "subject_graph", ",", "one_match", ",", "level", "+", "1", ")", ":", "yield", "match", "self", ".", "print_debug", "(", "\"LEAVING_ITER_MATCHES\"", ",", "-", "1", ")" ]
Given an onset for a match, iterate over all completions of that match This iterator works recursively. At each level the match is extended with a new set of relations based on vertices in the pattern graph that are at a distances 'level' from the starting vertex
[ "Given", "an", "onset", "for", "a", "match", "iterate", "over", "all", "completions", "of", "that", "match" ]
train
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L1696-L1741
molmod/molmod
molmod/io/pdb.py
dump_pdb
def dump_pdb(filename, molecule, atomnames=None, resnames=None, chain_ids=None, occupancies=None, betas=None): """Writes a single molecule to a pdb file. This function is based on the pdb file specification: http://www.wwpdb.org/documentation/format32/sect9.html For convenience, the relevant table is copied and the character indexes are transformed to C-style (starting from zero) ======= ============ ========== ========================================== COLUMNS DATA TYPE FIELD DEFINITION ======= ============ ========== ========================================== 0 - 5 Record name "ATOM " 6 - 10 Integer serial Atom serial number. 12 - 15 Atom name Atom name. 16 Character altLoc Alternate location indicator. 17 - 19 Residue name resName Residue name. 21 Character chainID Chain identifier. 22 - 25 Integer resSeq Residue sequence number. 26 AChar iCode Code for insertion of residues. 30 - 37 Real(8.3) x Orthogonal coordinates for X in Angstroms. 38 - 45 Real(8.3) y Orthogonal coordinates for Y in Angstroms. 46 - 53 Real(8.3) z Orthogonal coordinates for Z in Angstroms. 54 - 59 Real(6.2) occupancy Occupancy. 60 - 65 Real(6.2) tempFactor Temperature factor. 76 - 77 LString(2) element Element symbol, right-justified. 78 - 79 LString(2) charge Charge on the atom. ======= ============ ========== ========================================== """ with open(filename, "w") as f: res_id = 1 old_resname = None for i in range(molecule.size): symbol = periodic[molecule.numbers[i]].symbol if atomnames is None: atomname = symbol else: atomname = atomnames[i] if resnames is None: resname = "OXO" else: resname = resnames[i] if resname != old_resname: res_id += 1 if chain_ids is None: chain_id = "A" else: chain_id = chain_ids[i] if occupancies is None: occupancy = 1.0 else: occupancy = occupancies[i] if betas is None: beta = 1.0 else: beta = betas[i] print("ATOM %4i %3s %3s %1s%4i %8.3f%8.3f%8.3f%6.2f%6.2f %2s " % ( i+1, atomname.ljust(3), resname.ljust(3), chain_id, res_id, molecule.coordinates[i, 0]/angstrom, molecule.coordinates[i, 1]/angstrom, molecule.coordinates[i, 2]/angstrom, occupancy, beta, symbol.ljust(2) ), file=f) old_resname = resname
python
def dump_pdb(filename, molecule, atomnames=None, resnames=None, chain_ids=None, occupancies=None, betas=None): """Writes a single molecule to a pdb file. This function is based on the pdb file specification: http://www.wwpdb.org/documentation/format32/sect9.html For convenience, the relevant table is copied and the character indexes are transformed to C-style (starting from zero) ======= ============ ========== ========================================== COLUMNS DATA TYPE FIELD DEFINITION ======= ============ ========== ========================================== 0 - 5 Record name "ATOM " 6 - 10 Integer serial Atom serial number. 12 - 15 Atom name Atom name. 16 Character altLoc Alternate location indicator. 17 - 19 Residue name resName Residue name. 21 Character chainID Chain identifier. 22 - 25 Integer resSeq Residue sequence number. 26 AChar iCode Code for insertion of residues. 30 - 37 Real(8.3) x Orthogonal coordinates for X in Angstroms. 38 - 45 Real(8.3) y Orthogonal coordinates for Y in Angstroms. 46 - 53 Real(8.3) z Orthogonal coordinates for Z in Angstroms. 54 - 59 Real(6.2) occupancy Occupancy. 60 - 65 Real(6.2) tempFactor Temperature factor. 76 - 77 LString(2) element Element symbol, right-justified. 78 - 79 LString(2) charge Charge on the atom. ======= ============ ========== ========================================== """ with open(filename, "w") as f: res_id = 1 old_resname = None for i in range(molecule.size): symbol = periodic[molecule.numbers[i]].symbol if atomnames is None: atomname = symbol else: atomname = atomnames[i] if resnames is None: resname = "OXO" else: resname = resnames[i] if resname != old_resname: res_id += 1 if chain_ids is None: chain_id = "A" else: chain_id = chain_ids[i] if occupancies is None: occupancy = 1.0 else: occupancy = occupancies[i] if betas is None: beta = 1.0 else: beta = betas[i] print("ATOM %4i %3s %3s %1s%4i %8.3f%8.3f%8.3f%6.2f%6.2f %2s " % ( i+1, atomname.ljust(3), resname.ljust(3), chain_id, res_id, molecule.coordinates[i, 0]/angstrom, molecule.coordinates[i, 1]/angstrom, molecule.coordinates[i, 2]/angstrom, occupancy, beta, symbol.ljust(2) ), file=f) old_resname = resname
[ "def", "dump_pdb", "(", "filename", ",", "molecule", ",", "atomnames", "=", "None", ",", "resnames", "=", "None", ",", "chain_ids", "=", "None", ",", "occupancies", "=", "None", ",", "betas", "=", "None", ")", ":", "with", "open", "(", "filename", ",", "\"w\"", ")", "as", "f", ":", "res_id", "=", "1", "old_resname", "=", "None", "for", "i", "in", "range", "(", "molecule", ".", "size", ")", ":", "symbol", "=", "periodic", "[", "molecule", ".", "numbers", "[", "i", "]", "]", ".", "symbol", "if", "atomnames", "is", "None", ":", "atomname", "=", "symbol", "else", ":", "atomname", "=", "atomnames", "[", "i", "]", "if", "resnames", "is", "None", ":", "resname", "=", "\"OXO\"", "else", ":", "resname", "=", "resnames", "[", "i", "]", "if", "resname", "!=", "old_resname", ":", "res_id", "+=", "1", "if", "chain_ids", "is", "None", ":", "chain_id", "=", "\"A\"", "else", ":", "chain_id", "=", "chain_ids", "[", "i", "]", "if", "occupancies", "is", "None", ":", "occupancy", "=", "1.0", "else", ":", "occupancy", "=", "occupancies", "[", "i", "]", "if", "betas", "is", "None", ":", "beta", "=", "1.0", "else", ":", "beta", "=", "betas", "[", "i", "]", "print", "(", "\"ATOM %4i %3s %3s %1s%4i %8.3f%8.3f%8.3f%6.2f%6.2f %2s \"", "%", "(", "i", "+", "1", ",", "atomname", ".", "ljust", "(", "3", ")", ",", "resname", ".", "ljust", "(", "3", ")", ",", "chain_id", ",", "res_id", ",", "molecule", ".", "coordinates", "[", "i", ",", "0", "]", "/", "angstrom", ",", "molecule", ".", "coordinates", "[", "i", ",", "1", "]", "/", "angstrom", ",", "molecule", ".", "coordinates", "[", "i", ",", "2", "]", "/", "angstrom", ",", "occupancy", ",", "beta", ",", "symbol", ".", "ljust", "(", "2", ")", ")", ",", "file", "=", "f", ")", "old_resname", "=", "resname" ]
Writes a single molecule to a pdb file. This function is based on the pdb file specification: http://www.wwpdb.org/documentation/format32/sect9.html For convenience, the relevant table is copied and the character indexes are transformed to C-style (starting from zero) ======= ============ ========== ========================================== COLUMNS DATA TYPE FIELD DEFINITION ======= ============ ========== ========================================== 0 - 5 Record name "ATOM " 6 - 10 Integer serial Atom serial number. 12 - 15 Atom name Atom name. 16 Character altLoc Alternate location indicator. 17 - 19 Residue name resName Residue name. 21 Character chainID Chain identifier. 22 - 25 Integer resSeq Residue sequence number. 26 AChar iCode Code for insertion of residues. 30 - 37 Real(8.3) x Orthogonal coordinates for X in Angstroms. 38 - 45 Real(8.3) y Orthogonal coordinates for Y in Angstroms. 46 - 53 Real(8.3) z Orthogonal coordinates for Z in Angstroms. 54 - 59 Real(6.2) occupancy Occupancy. 60 - 65 Real(6.2) tempFactor Temperature factor. 76 - 77 LString(2) element Element symbol, right-justified. 78 - 79 LString(2) charge Charge on the atom. ======= ============ ========== ==========================================
[ "Writes", "a", "single", "molecule", "to", "a", "pdb", "file", "." ]
train
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/io/pdb.py#L40-L105
molmod/molmod
molmod/io/pdb.py
load_pdb
def load_pdb(filename): """Loads a single molecule from a pdb file. This function does support only a small fragment from the pdb specification. It assumes that there is only one molecular geometry in the pdb file. """ with open(filename) as f: numbers = [] coordinates = [] occupancies = [] betas = [] for line in f: if line.startswith("ATOM"): symbol = line[76:78].strip() numbers.append(periodic[symbol].number) coordinates.append([float(line[30:38])*angstrom, float(line[38:46])*angstrom, float(line[46:54])*angstrom]) occupancies.append(float(line[54:60])) betas.append(float(line[60:66])) if len(numbers) > 0: molecule = Molecule(numbers, coordinates) molecule.occupancies = np.array(occupancies) molecule.betas = np.array(betas) return molecule else: raise FileFormatError("No molecule found in pdb file %s" % filename)
python
def load_pdb(filename): """Loads a single molecule from a pdb file. This function does support only a small fragment from the pdb specification. It assumes that there is only one molecular geometry in the pdb file. """ with open(filename) as f: numbers = [] coordinates = [] occupancies = [] betas = [] for line in f: if line.startswith("ATOM"): symbol = line[76:78].strip() numbers.append(periodic[symbol].number) coordinates.append([float(line[30:38])*angstrom, float(line[38:46])*angstrom, float(line[46:54])*angstrom]) occupancies.append(float(line[54:60])) betas.append(float(line[60:66])) if len(numbers) > 0: molecule = Molecule(numbers, coordinates) molecule.occupancies = np.array(occupancies) molecule.betas = np.array(betas) return molecule else: raise FileFormatError("No molecule found in pdb file %s" % filename)
[ "def", "load_pdb", "(", "filename", ")", ":", "with", "open", "(", "filename", ")", "as", "f", ":", "numbers", "=", "[", "]", "coordinates", "=", "[", "]", "occupancies", "=", "[", "]", "betas", "=", "[", "]", "for", "line", "in", "f", ":", "if", "line", ".", "startswith", "(", "\"ATOM\"", ")", ":", "symbol", "=", "line", "[", "76", ":", "78", "]", ".", "strip", "(", ")", "numbers", ".", "append", "(", "periodic", "[", "symbol", "]", ".", "number", ")", "coordinates", ".", "append", "(", "[", "float", "(", "line", "[", "30", ":", "38", "]", ")", "*", "angstrom", ",", "float", "(", "line", "[", "38", ":", "46", "]", ")", "*", "angstrom", ",", "float", "(", "line", "[", "46", ":", "54", "]", ")", "*", "angstrom", "]", ")", "occupancies", ".", "append", "(", "float", "(", "line", "[", "54", ":", "60", "]", ")", ")", "betas", ".", "append", "(", "float", "(", "line", "[", "60", ":", "66", "]", ")", ")", "if", "len", "(", "numbers", ")", ">", "0", ":", "molecule", "=", "Molecule", "(", "numbers", ",", "coordinates", ")", "molecule", ".", "occupancies", "=", "np", ".", "array", "(", "occupancies", ")", "molecule", ".", "betas", "=", "np", ".", "array", "(", "betas", ")", "return", "molecule", "else", ":", "raise", "FileFormatError", "(", "\"No molecule found in pdb file %s\"", "%", "filename", ")" ]
Loads a single molecule from a pdb file. This function does support only a small fragment from the pdb specification. It assumes that there is only one molecular geometry in the pdb file.
[ "Loads", "a", "single", "molecule", "from", "a", "pdb", "file", "." ]
train
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/io/pdb.py#L108-L132
molmod/molmod
molmod/zmatrix.py
zmat_to_cart
def zmat_to_cart(zmat): """Converts a ZMatrix back to cartesian coordinates.""" numbers = zmat["number"] N = len(numbers) coordinates = np.zeros((N, 3), float) # special cases for the first coordinates coordinates[1, 2] = zmat["distance"][1] if zmat["rel1"][2] == 1: sign = -1 else: sign = 1 coordinates[2, 2] = zmat["distance"][2]*sign*np.cos(zmat["angle"][2]) coordinates[2, 1] = zmat["distance"][2]*sign*np.sin(zmat["angle"][2]) coordinates[2] += coordinates[2-zmat["rel1"][2]] ref0 = 3 for (number, distance, rel1, angle, rel2, dihed, rel3) in zmat[3:]: ref1 = ref0 - rel1 ref2 = ref0 - rel2 ref3 = ref0 - rel3 if ref1 < 0: ref1 = 0 if ref2 < 0: ref2 = 0 if ref3 < 0: ref3 = 0 # define frame axes origin = coordinates[ref1] new_z = coordinates[ref2] - origin norm_z = np.linalg.norm(new_z) if norm_z < 1e-15: new_z = np.array([0, 0, 1], float) else: new_z /= np.linalg.norm(new_z) new_x = coordinates[ref3] - origin new_x -= np.dot(new_x, new_z)*new_z norm_x = np.linalg.norm(new_x) if norm_x < 1e-15: new_x = random_orthonormal(new_z) else: new_x /= np.linalg.norm(new_x) # we must make our axes frame left handed due to the poor IUPAC # definition of the sign of a dihedral angle. new_y = -np.cross(new_z, new_x) # coordinates of new atom: x = distance*np.cos(dihed)*np.sin(angle) y = distance*np.sin(dihed)*np.sin(angle) z = distance*np.cos(angle) coordinates[ref0] = origin + x*new_x + y*new_y + z*new_z # loop ref0 += 1 return numbers, coordinates
python
def zmat_to_cart(zmat): """Converts a ZMatrix back to cartesian coordinates.""" numbers = zmat["number"] N = len(numbers) coordinates = np.zeros((N, 3), float) # special cases for the first coordinates coordinates[1, 2] = zmat["distance"][1] if zmat["rel1"][2] == 1: sign = -1 else: sign = 1 coordinates[2, 2] = zmat["distance"][2]*sign*np.cos(zmat["angle"][2]) coordinates[2, 1] = zmat["distance"][2]*sign*np.sin(zmat["angle"][2]) coordinates[2] += coordinates[2-zmat["rel1"][2]] ref0 = 3 for (number, distance, rel1, angle, rel2, dihed, rel3) in zmat[3:]: ref1 = ref0 - rel1 ref2 = ref0 - rel2 ref3 = ref0 - rel3 if ref1 < 0: ref1 = 0 if ref2 < 0: ref2 = 0 if ref3 < 0: ref3 = 0 # define frame axes origin = coordinates[ref1] new_z = coordinates[ref2] - origin norm_z = np.linalg.norm(new_z) if norm_z < 1e-15: new_z = np.array([0, 0, 1], float) else: new_z /= np.linalg.norm(new_z) new_x = coordinates[ref3] - origin new_x -= np.dot(new_x, new_z)*new_z norm_x = np.linalg.norm(new_x) if norm_x < 1e-15: new_x = random_orthonormal(new_z) else: new_x /= np.linalg.norm(new_x) # we must make our axes frame left handed due to the poor IUPAC # definition of the sign of a dihedral angle. new_y = -np.cross(new_z, new_x) # coordinates of new atom: x = distance*np.cos(dihed)*np.sin(angle) y = distance*np.sin(dihed)*np.sin(angle) z = distance*np.cos(angle) coordinates[ref0] = origin + x*new_x + y*new_y + z*new_z # loop ref0 += 1 return numbers, coordinates
[ "def", "zmat_to_cart", "(", "zmat", ")", ":", "numbers", "=", "zmat", "[", "\"number\"", "]", "N", "=", "len", "(", "numbers", ")", "coordinates", "=", "np", ".", "zeros", "(", "(", "N", ",", "3", ")", ",", "float", ")", "# special cases for the first coordinates", "coordinates", "[", "1", ",", "2", "]", "=", "zmat", "[", "\"distance\"", "]", "[", "1", "]", "if", "zmat", "[", "\"rel1\"", "]", "[", "2", "]", "==", "1", ":", "sign", "=", "-", "1", "else", ":", "sign", "=", "1", "coordinates", "[", "2", ",", "2", "]", "=", "zmat", "[", "\"distance\"", "]", "[", "2", "]", "*", "sign", "*", "np", ".", "cos", "(", "zmat", "[", "\"angle\"", "]", "[", "2", "]", ")", "coordinates", "[", "2", ",", "1", "]", "=", "zmat", "[", "\"distance\"", "]", "[", "2", "]", "*", "sign", "*", "np", ".", "sin", "(", "zmat", "[", "\"angle\"", "]", "[", "2", "]", ")", "coordinates", "[", "2", "]", "+=", "coordinates", "[", "2", "-", "zmat", "[", "\"rel1\"", "]", "[", "2", "]", "]", "ref0", "=", "3", "for", "(", "number", ",", "distance", ",", "rel1", ",", "angle", ",", "rel2", ",", "dihed", ",", "rel3", ")", "in", "zmat", "[", "3", ":", "]", ":", "ref1", "=", "ref0", "-", "rel1", "ref2", "=", "ref0", "-", "rel2", "ref3", "=", "ref0", "-", "rel3", "if", "ref1", "<", "0", ":", "ref1", "=", "0", "if", "ref2", "<", "0", ":", "ref2", "=", "0", "if", "ref3", "<", "0", ":", "ref3", "=", "0", "# define frame axes", "origin", "=", "coordinates", "[", "ref1", "]", "new_z", "=", "coordinates", "[", "ref2", "]", "-", "origin", "norm_z", "=", "np", ".", "linalg", ".", "norm", "(", "new_z", ")", "if", "norm_z", "<", "1e-15", ":", "new_z", "=", "np", ".", "array", "(", "[", "0", ",", "0", ",", "1", "]", ",", "float", ")", "else", ":", "new_z", "/=", "np", ".", "linalg", ".", "norm", "(", "new_z", ")", "new_x", "=", "coordinates", "[", "ref3", "]", "-", "origin", "new_x", "-=", "np", ".", "dot", "(", "new_x", ",", "new_z", ")", "*", "new_z", "norm_x", "=", "np", ".", "linalg", ".", "norm", "(", "new_x", ")", "if", "norm_x", "<", "1e-15", ":", "new_x", "=", "random_orthonormal", "(", "new_z", ")", "else", ":", "new_x", "/=", "np", ".", "linalg", ".", "norm", "(", "new_x", ")", "# we must make our axes frame left handed due to the poor IUPAC", "# definition of the sign of a dihedral angle.", "new_y", "=", "-", "np", ".", "cross", "(", "new_z", ",", "new_x", ")", "# coordinates of new atom:", "x", "=", "distance", "*", "np", ".", "cos", "(", "dihed", ")", "*", "np", ".", "sin", "(", "angle", ")", "y", "=", "distance", "*", "np", ".", "sin", "(", "dihed", ")", "*", "np", ".", "sin", "(", "angle", ")", "z", "=", "distance", "*", "np", ".", "cos", "(", "angle", ")", "coordinates", "[", "ref0", "]", "=", "origin", "+", "x", "*", "new_x", "+", "y", "*", "new_y", "+", "z", "*", "new_z", "# loop", "ref0", "+=", "1", "return", "numbers", ",", "coordinates" ]
Converts a ZMatrix back to cartesian coordinates.
[ "Converts", "a", "ZMatrix", "back", "to", "cartesian", "coordinates", "." ]
train
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/zmatrix.py#L157-L209
molmod/molmod
molmod/zmatrix.py
ZMatrixGenerator._get_new_ref
def _get_new_ref(self, existing_refs): """Get a new reference atom for a row in the ZMatrix The reference atoms should obey the following conditions: - They must be different - They must be neighbours in the bond graph - They must have an index lower than the current atom If multiple candidate refs can be found, take the heaviest atom """ # ref0 is the atom whose position is defined by the current row in the # zmatrix. ref0 = existing_refs[0] for ref in existing_refs: # try to find a neighbor of the ref that can serve as the new ref result = None for n in sorted(self.graph.neighbors[ref]): if self.new_index[n] > self.new_index[ref0]: # index is too high, zmatrix rows can't refer to future # atoms continue if n in existing_refs: # ref is already in use continue if result is None or self.graph.numbers[n] <= self.graph.numbers[result]: # acceptable ref, prefer heaviest atom result = n if result is not None: return result raise RuntimeError("Could not find new reference.")
python
def _get_new_ref(self, existing_refs): """Get a new reference atom for a row in the ZMatrix The reference atoms should obey the following conditions: - They must be different - They must be neighbours in the bond graph - They must have an index lower than the current atom If multiple candidate refs can be found, take the heaviest atom """ # ref0 is the atom whose position is defined by the current row in the # zmatrix. ref0 = existing_refs[0] for ref in existing_refs: # try to find a neighbor of the ref that can serve as the new ref result = None for n in sorted(self.graph.neighbors[ref]): if self.new_index[n] > self.new_index[ref0]: # index is too high, zmatrix rows can't refer to future # atoms continue if n in existing_refs: # ref is already in use continue if result is None or self.graph.numbers[n] <= self.graph.numbers[result]: # acceptable ref, prefer heaviest atom result = n if result is not None: return result raise RuntimeError("Could not find new reference.")
[ "def", "_get_new_ref", "(", "self", ",", "existing_refs", ")", ":", "# ref0 is the atom whose position is defined by the current row in the", "# zmatrix.", "ref0", "=", "existing_refs", "[", "0", "]", "for", "ref", "in", "existing_refs", ":", "# try to find a neighbor of the ref that can serve as the new ref", "result", "=", "None", "for", "n", "in", "sorted", "(", "self", ".", "graph", ".", "neighbors", "[", "ref", "]", ")", ":", "if", "self", ".", "new_index", "[", "n", "]", ">", "self", ".", "new_index", "[", "ref0", "]", ":", "# index is too high, zmatrix rows can't refer to future", "# atoms", "continue", "if", "n", "in", "existing_refs", ":", "# ref is already in use", "continue", "if", "result", "is", "None", "or", "self", ".", "graph", ".", "numbers", "[", "n", "]", "<=", "self", ".", "graph", ".", "numbers", "[", "result", "]", ":", "# acceptable ref, prefer heaviest atom", "result", "=", "n", "if", "result", "is", "not", "None", ":", "return", "result", "raise", "RuntimeError", "(", "\"Could not find new reference.\"", ")" ]
Get a new reference atom for a row in the ZMatrix The reference atoms should obey the following conditions: - They must be different - They must be neighbours in the bond graph - They must have an index lower than the current atom If multiple candidate refs can be found, take the heaviest atom
[ "Get", "a", "new", "reference", "atom", "for", "a", "row", "in", "the", "ZMatrix" ]
train
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/zmatrix.py#L89-L118
molmod/molmod
molmod/zmatrix.py
ZMatrixGenerator.cart_to_zmat
def cart_to_zmat(self, coordinates): """Convert cartesian coordinates to ZMatrix format Argument: coordinates -- Cartesian coordinates (numpy array Nx3) The coordinates must match with the graph that was used to initialize the ZMatrixGenerator object. """ N = len(self.graph.numbers) if coordinates.shape != (N, 3): raise ValueError("The shape of the coordinates must be (%i, 3)" % N) result = np.zeros(N, dtype=self.dtype) for i in range(N): ref0 = self.old_index[i] rel1 = -1 rel2 = -1 rel3 = -1 distance = 0 angle = 0 dihed = 0 if i > 0: ref1 = self._get_new_ref([ref0]) distance = np.linalg.norm(coordinates[ref0]-coordinates[ref1]) rel1 = i - self.new_index[ref1] if i > 1: ref2 = self._get_new_ref([ref0, ref1]) angle, = ic.bend_angle(coordinates[[ref0, ref1, ref2]]) rel2 = i - self.new_index[ref2] if i > 2: ref3 = self._get_new_ref([ref0, ref1, ref2]) dihed, = ic.dihed_angle(coordinates[[ref0, ref1, ref2, ref3]]) rel3 = i - self.new_index[ref3] result[i] = (self.graph.numbers[i], distance, rel1, angle, rel2, dihed, rel3) return result
python
def cart_to_zmat(self, coordinates): """Convert cartesian coordinates to ZMatrix format Argument: coordinates -- Cartesian coordinates (numpy array Nx3) The coordinates must match with the graph that was used to initialize the ZMatrixGenerator object. """ N = len(self.graph.numbers) if coordinates.shape != (N, 3): raise ValueError("The shape of the coordinates must be (%i, 3)" % N) result = np.zeros(N, dtype=self.dtype) for i in range(N): ref0 = self.old_index[i] rel1 = -1 rel2 = -1 rel3 = -1 distance = 0 angle = 0 dihed = 0 if i > 0: ref1 = self._get_new_ref([ref0]) distance = np.linalg.norm(coordinates[ref0]-coordinates[ref1]) rel1 = i - self.new_index[ref1] if i > 1: ref2 = self._get_new_ref([ref0, ref1]) angle, = ic.bend_angle(coordinates[[ref0, ref1, ref2]]) rel2 = i - self.new_index[ref2] if i > 2: ref3 = self._get_new_ref([ref0, ref1, ref2]) dihed, = ic.dihed_angle(coordinates[[ref0, ref1, ref2, ref3]]) rel3 = i - self.new_index[ref3] result[i] = (self.graph.numbers[i], distance, rel1, angle, rel2, dihed, rel3) return result
[ "def", "cart_to_zmat", "(", "self", ",", "coordinates", ")", ":", "N", "=", "len", "(", "self", ".", "graph", ".", "numbers", ")", "if", "coordinates", ".", "shape", "!=", "(", "N", ",", "3", ")", ":", "raise", "ValueError", "(", "\"The shape of the coordinates must be (%i, 3)\"", "%", "N", ")", "result", "=", "np", ".", "zeros", "(", "N", ",", "dtype", "=", "self", ".", "dtype", ")", "for", "i", "in", "range", "(", "N", ")", ":", "ref0", "=", "self", ".", "old_index", "[", "i", "]", "rel1", "=", "-", "1", "rel2", "=", "-", "1", "rel3", "=", "-", "1", "distance", "=", "0", "angle", "=", "0", "dihed", "=", "0", "if", "i", ">", "0", ":", "ref1", "=", "self", ".", "_get_new_ref", "(", "[", "ref0", "]", ")", "distance", "=", "np", ".", "linalg", ".", "norm", "(", "coordinates", "[", "ref0", "]", "-", "coordinates", "[", "ref1", "]", ")", "rel1", "=", "i", "-", "self", ".", "new_index", "[", "ref1", "]", "if", "i", ">", "1", ":", "ref2", "=", "self", ".", "_get_new_ref", "(", "[", "ref0", ",", "ref1", "]", ")", "angle", ",", "=", "ic", ".", "bend_angle", "(", "coordinates", "[", "[", "ref0", ",", "ref1", ",", "ref2", "]", "]", ")", "rel2", "=", "i", "-", "self", ".", "new_index", "[", "ref2", "]", "if", "i", ">", "2", ":", "ref3", "=", "self", ".", "_get_new_ref", "(", "[", "ref0", ",", "ref1", ",", "ref2", "]", ")", "dihed", ",", "=", "ic", ".", "dihed_angle", "(", "coordinates", "[", "[", "ref0", ",", "ref1", ",", "ref2", ",", "ref3", "]", "]", ")", "rel3", "=", "i", "-", "self", ".", "new_index", "[", "ref3", "]", "result", "[", "i", "]", "=", "(", "self", ".", "graph", ".", "numbers", "[", "i", "]", ",", "distance", ",", "rel1", ",", "angle", ",", "rel2", ",", "dihed", ",", "rel3", ")", "return", "result" ]
Convert cartesian coordinates to ZMatrix format Argument: coordinates -- Cartesian coordinates (numpy array Nx3) The coordinates must match with the graph that was used to initialize the ZMatrixGenerator object.
[ "Convert", "cartesian", "coordinates", "to", "ZMatrix", "format" ]
train
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/zmatrix.py#L120-L154
pettarin/ipapy
ipapy/mapper.py
Mapper.can_map_ipa_string
def can_map_ipa_string(self, ipa_string): """ Return ``True`` if the mapper can map all the IPA characters in the given IPA string. :param IPAString ipa_string: the IPAString to be parsed :rtype: bool """ canonical = [(c.canonical_representation, ) for c in ipa_string] split = split_using_dictionary(canonical, self, self.max_key_length, single_char_parsing=False) for sub in split: if not sub in self.ipa_canonical_representation_to_mapped_str: return False return True
python
def can_map_ipa_string(self, ipa_string): """ Return ``True`` if the mapper can map all the IPA characters in the given IPA string. :param IPAString ipa_string: the IPAString to be parsed :rtype: bool """ canonical = [(c.canonical_representation, ) for c in ipa_string] split = split_using_dictionary(canonical, self, self.max_key_length, single_char_parsing=False) for sub in split: if not sub in self.ipa_canonical_representation_to_mapped_str: return False return True
[ "def", "can_map_ipa_string", "(", "self", ",", "ipa_string", ")", ":", "canonical", "=", "[", "(", "c", ".", "canonical_representation", ",", ")", "for", "c", "in", "ipa_string", "]", "split", "=", "split_using_dictionary", "(", "canonical", ",", "self", ",", "self", ".", "max_key_length", ",", "single_char_parsing", "=", "False", ")", "for", "sub", "in", "split", ":", "if", "not", "sub", "in", "self", ".", "ipa_canonical_representation_to_mapped_str", ":", "return", "False", "return", "True" ]
Return ``True`` if the mapper can map all the IPA characters in the given IPA string. :param IPAString ipa_string: the IPAString to be parsed :rtype: bool
[ "Return", "True", "if", "the", "mapper", "can", "map", "all", "the", "IPA", "characters", "in", "the", "given", "IPA", "string", "." ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/mapper.py#L93-L106
pettarin/ipapy
ipapy/mapper.py
Mapper.map_ipa_string
def map_ipa_string(self, ipa_string, ignore=False, return_as_list=False, return_can_map=False): """ Convert the given IPAString to a string containing the corresponding ASCII IPA representation. :param IPAString ipa_string: the IPAString to be parsed :param bool ignore: if ``True``, ignore Unicode characters that are not IPA valid :param bool return_as_list: if ``True``, return as a list of strings, one for each IPAChar, instead of their concatenation (single str) :param bool return_can_map: if ``True``, return a pair ``(bool, str)``, where the first element says if the mapper can map all the IPA characters in the given IPA string, and the second element is either ``None`` or the mapped string/list :rtype: str or (bool, str) or (bool, list) """ acc = [] can_map = True canonical = [(c.canonical_representation, ) for c in ipa_string] split = split_using_dictionary(canonical, self, self.max_key_length, single_char_parsing=False) for sub in split: try: acc.append(self.ipa_canonical_representation_to_mapped_str[sub]) except KeyError: if ignore: can_map = False else: raise ValueError("The IPA string contains an IPA character that is not mapped: %s" % sub) mapped = acc if return_as_list else u"".join(acc) if return_can_map: return (can_map, mapped) return mapped
python
def map_ipa_string(self, ipa_string, ignore=False, return_as_list=False, return_can_map=False): """ Convert the given IPAString to a string containing the corresponding ASCII IPA representation. :param IPAString ipa_string: the IPAString to be parsed :param bool ignore: if ``True``, ignore Unicode characters that are not IPA valid :param bool return_as_list: if ``True``, return as a list of strings, one for each IPAChar, instead of their concatenation (single str) :param bool return_can_map: if ``True``, return a pair ``(bool, str)``, where the first element says if the mapper can map all the IPA characters in the given IPA string, and the second element is either ``None`` or the mapped string/list :rtype: str or (bool, str) or (bool, list) """ acc = [] can_map = True canonical = [(c.canonical_representation, ) for c in ipa_string] split = split_using_dictionary(canonical, self, self.max_key_length, single_char_parsing=False) for sub in split: try: acc.append(self.ipa_canonical_representation_to_mapped_str[sub]) except KeyError: if ignore: can_map = False else: raise ValueError("The IPA string contains an IPA character that is not mapped: %s" % sub) mapped = acc if return_as_list else u"".join(acc) if return_can_map: return (can_map, mapped) return mapped
[ "def", "map_ipa_string", "(", "self", ",", "ipa_string", ",", "ignore", "=", "False", ",", "return_as_list", "=", "False", ",", "return_can_map", "=", "False", ")", ":", "acc", "=", "[", "]", "can_map", "=", "True", "canonical", "=", "[", "(", "c", ".", "canonical_representation", ",", ")", "for", "c", "in", "ipa_string", "]", "split", "=", "split_using_dictionary", "(", "canonical", ",", "self", ",", "self", ".", "max_key_length", ",", "single_char_parsing", "=", "False", ")", "for", "sub", "in", "split", ":", "try", ":", "acc", ".", "append", "(", "self", ".", "ipa_canonical_representation_to_mapped_str", "[", "sub", "]", ")", "except", "KeyError", ":", "if", "ignore", ":", "can_map", "=", "False", "else", ":", "raise", "ValueError", "(", "\"The IPA string contains an IPA character that is not mapped: %s\"", "%", "sub", ")", "mapped", "=", "acc", "if", "return_as_list", "else", "u\"\"", ".", "join", "(", "acc", ")", "if", "return_can_map", ":", "return", "(", "can_map", ",", "mapped", ")", "return", "mapped" ]
Convert the given IPAString to a string containing the corresponding ASCII IPA representation. :param IPAString ipa_string: the IPAString to be parsed :param bool ignore: if ``True``, ignore Unicode characters that are not IPA valid :param bool return_as_list: if ``True``, return as a list of strings, one for each IPAChar, instead of their concatenation (single str) :param bool return_can_map: if ``True``, return a pair ``(bool, str)``, where the first element says if the mapper can map all the IPA characters in the given IPA string, and the second element is either ``None`` or the mapped string/list :rtype: str or (bool, str) or (bool, list)
[ "Convert", "the", "given", "IPAString", "to", "a", "string", "containing", "the", "corresponding", "ASCII", "IPA", "representation", "." ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/mapper.py#L108-L137
pettarin/ipapy
ipapy/mapper.py
Mapper.map_unicode_string
def map_unicode_string(self, unicode_string, ignore=False, single_char_parsing=False, return_as_list=False, return_can_map=False): """ Convert the given Unicode string, representing an IPA string, to a string containing the corresponding mapped representation. Return ``None`` if ``unicode_string`` is ``None``. :param str unicode_string: the Unicode string to be parsed :param bool ignore: if ``True``, ignore Unicode characters that are not IPA valid :param bool single_char_parsing: if ``True``, parse one Unicode character at a time :param bool return_as_list: if ``True``, return as a list of strings, one for each IPAChar, instead of their concatenation (single str) :param bool return_can_map: if ``True``, return a pair ``(bool, str)``, where the first element says if the mapper can map all the IPA characters in the given IPA string, and the second element is either ``None`` or the mapped string/list :rtype: str or (bool, str) or (bool, list) """ if unicode_string is None: return None ipa_string = IPAString(unicode_string=unicode_string, ignore=ignore, single_char_parsing=single_char_parsing) return self.map_ipa_string( ipa_string=ipa_string, ignore=ignore, return_as_list=return_as_list, return_can_map=return_can_map )
python
def map_unicode_string(self, unicode_string, ignore=False, single_char_parsing=False, return_as_list=False, return_can_map=False): """ Convert the given Unicode string, representing an IPA string, to a string containing the corresponding mapped representation. Return ``None`` if ``unicode_string`` is ``None``. :param str unicode_string: the Unicode string to be parsed :param bool ignore: if ``True``, ignore Unicode characters that are not IPA valid :param bool single_char_parsing: if ``True``, parse one Unicode character at a time :param bool return_as_list: if ``True``, return as a list of strings, one for each IPAChar, instead of their concatenation (single str) :param bool return_can_map: if ``True``, return a pair ``(bool, str)``, where the first element says if the mapper can map all the IPA characters in the given IPA string, and the second element is either ``None`` or the mapped string/list :rtype: str or (bool, str) or (bool, list) """ if unicode_string is None: return None ipa_string = IPAString(unicode_string=unicode_string, ignore=ignore, single_char_parsing=single_char_parsing) return self.map_ipa_string( ipa_string=ipa_string, ignore=ignore, return_as_list=return_as_list, return_can_map=return_can_map )
[ "def", "map_unicode_string", "(", "self", ",", "unicode_string", ",", "ignore", "=", "False", ",", "single_char_parsing", "=", "False", ",", "return_as_list", "=", "False", ",", "return_can_map", "=", "False", ")", ":", "if", "unicode_string", "is", "None", ":", "return", "None", "ipa_string", "=", "IPAString", "(", "unicode_string", "=", "unicode_string", ",", "ignore", "=", "ignore", ",", "single_char_parsing", "=", "single_char_parsing", ")", "return", "self", ".", "map_ipa_string", "(", "ipa_string", "=", "ipa_string", ",", "ignore", "=", "ignore", ",", "return_as_list", "=", "return_as_list", ",", "return_can_map", "=", "return_can_map", ")" ]
Convert the given Unicode string, representing an IPA string, to a string containing the corresponding mapped representation. Return ``None`` if ``unicode_string`` is ``None``. :param str unicode_string: the Unicode string to be parsed :param bool ignore: if ``True``, ignore Unicode characters that are not IPA valid :param bool single_char_parsing: if ``True``, parse one Unicode character at a time :param bool return_as_list: if ``True``, return as a list of strings, one for each IPAChar, instead of their concatenation (single str) :param bool return_can_map: if ``True``, return a pair ``(bool, str)``, where the first element says if the mapper can map all the IPA characters in the given IPA string, and the second element is either ``None`` or the mapped string/list :rtype: str or (bool, str) or (bool, list)
[ "Convert", "the", "given", "Unicode", "string", "representing", "an", "IPA", "string", "to", "a", "string", "containing", "the", "corresponding", "mapped", "representation", "." ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/mapper.py#L139-L164
pettarin/ipapy
ipapy/__main__.py
print_invalid_chars
def print_invalid_chars(invalid_chars, vargs): """ Print Unicode characterss that are not IPA valid, if requested by the user. :param list invalid_chars: a list (possibly empty) of invalid Unicode characters :param dict vargs: the command line parameters """ if len(invalid_chars) > 0: if vargs["print_invalid"]: print(u"".join(invalid_chars)) if vargs["unicode"]: for u_char in sorted(set(invalid_chars)): print(u"'%s'\t%s\t%s" % (u_char, hex(ord(u_char)), unicodedata.name(u_char, "UNKNOWN")))
python
def print_invalid_chars(invalid_chars, vargs): """ Print Unicode characterss that are not IPA valid, if requested by the user. :param list invalid_chars: a list (possibly empty) of invalid Unicode characters :param dict vargs: the command line parameters """ if len(invalid_chars) > 0: if vargs["print_invalid"]: print(u"".join(invalid_chars)) if vargs["unicode"]: for u_char in sorted(set(invalid_chars)): print(u"'%s'\t%s\t%s" % (u_char, hex(ord(u_char)), unicodedata.name(u_char, "UNKNOWN")))
[ "def", "print_invalid_chars", "(", "invalid_chars", ",", "vargs", ")", ":", "if", "len", "(", "invalid_chars", ")", ">", "0", ":", "if", "vargs", "[", "\"print_invalid\"", "]", ":", "print", "(", "u\"\"", ".", "join", "(", "invalid_chars", ")", ")", "if", "vargs", "[", "\"unicode\"", "]", ":", "for", "u_char", "in", "sorted", "(", "set", "(", "invalid_chars", ")", ")", ":", "print", "(", "u\"'%s'\\t%s\\t%s\"", "%", "(", "u_char", ",", "hex", "(", "ord", "(", "u_char", ")", ")", ",", "unicodedata", ".", "name", "(", "u_char", ",", "\"UNKNOWN\"", ")", ")", ")" ]
Print Unicode characterss that are not IPA valid, if requested by the user. :param list invalid_chars: a list (possibly empty) of invalid Unicode characters :param dict vargs: the command line parameters
[ "Print", "Unicode", "characterss", "that", "are", "not", "IPA", "valid", "if", "requested", "by", "the", "user", "." ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/__main__.py#L91-L104
pettarin/ipapy
ipapy/__main__.py
command_canonize
def command_canonize(string, vargs): """ Print the canonical representation of the given string. It will replace non-canonical compound characters with their canonical synonym. :param str string: the string to act upon :param dict vargs: the command line arguments """ try: ipa_string = IPAString( unicode_string=string, ignore=vargs["ignore"], single_char_parsing=vargs["single_char_parsing"] ) print(vargs["separator"].join([(u"%s" % c) for c in ipa_string])) except ValueError as exc: print_error(str(exc))
python
def command_canonize(string, vargs): """ Print the canonical representation of the given string. It will replace non-canonical compound characters with their canonical synonym. :param str string: the string to act upon :param dict vargs: the command line arguments """ try: ipa_string = IPAString( unicode_string=string, ignore=vargs["ignore"], single_char_parsing=vargs["single_char_parsing"] ) print(vargs["separator"].join([(u"%s" % c) for c in ipa_string])) except ValueError as exc: print_error(str(exc))
[ "def", "command_canonize", "(", "string", ",", "vargs", ")", ":", "try", ":", "ipa_string", "=", "IPAString", "(", "unicode_string", "=", "string", ",", "ignore", "=", "vargs", "[", "\"ignore\"", "]", ",", "single_char_parsing", "=", "vargs", "[", "\"single_char_parsing\"", "]", ")", "print", "(", "vargs", "[", "\"separator\"", "]", ".", "join", "(", "[", "(", "u\"%s\"", "%", "c", ")", "for", "c", "in", "ipa_string", "]", ")", ")", "except", "ValueError", "as", "exc", ":", "print_error", "(", "str", "(", "exc", ")", ")" ]
Print the canonical representation of the given string. It will replace non-canonical compound characters with their canonical synonym. :param str string: the string to act upon :param dict vargs: the command line arguments
[ "Print", "the", "canonical", "representation", "of", "the", "given", "string", "." ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/__main__.py#L106-L124
pettarin/ipapy
ipapy/__main__.py
command_chars
def command_chars(string, vargs): """ Print a list of all IPA characters in the given string. It will print the Unicode representation, the full IPA name, and the Unicode "U+"-prefixed hexadecimal codepoint representation of each IPA character. :param str string: the string to act upon :param dict vargs: the command line arguments """ try: ipa_string = IPAString( unicode_string=string, ignore=vargs["ignore"], single_char_parsing=vargs["single_char_parsing"] ) for c in ipa_string: print(u"'%s'\t%s (%s)" % (c.unicode_repr, c.name, unicode_to_hex(c.unicode_repr))) except ValueError as exc: print_error(str(exc))
python
def command_chars(string, vargs): """ Print a list of all IPA characters in the given string. It will print the Unicode representation, the full IPA name, and the Unicode "U+"-prefixed hexadecimal codepoint representation of each IPA character. :param str string: the string to act upon :param dict vargs: the command line arguments """ try: ipa_string = IPAString( unicode_string=string, ignore=vargs["ignore"], single_char_parsing=vargs["single_char_parsing"] ) for c in ipa_string: print(u"'%s'\t%s (%s)" % (c.unicode_repr, c.name, unicode_to_hex(c.unicode_repr))) except ValueError as exc: print_error(str(exc))
[ "def", "command_chars", "(", "string", ",", "vargs", ")", ":", "try", ":", "ipa_string", "=", "IPAString", "(", "unicode_string", "=", "string", ",", "ignore", "=", "vargs", "[", "\"ignore\"", "]", ",", "single_char_parsing", "=", "vargs", "[", "\"single_char_parsing\"", "]", ")", "for", "c", "in", "ipa_string", ":", "print", "(", "u\"'%s'\\t%s (%s)\"", "%", "(", "c", ".", "unicode_repr", ",", "c", ".", "name", ",", "unicode_to_hex", "(", "c", ".", "unicode_repr", ")", ")", ")", "except", "ValueError", "as", "exc", ":", "print_error", "(", "str", "(", "exc", ")", ")" ]
Print a list of all IPA characters in the given string. It will print the Unicode representation, the full IPA name, and the Unicode "U+"-prefixed hexadecimal codepoint representation of each IPA character. :param str string: the string to act upon :param dict vargs: the command line arguments
[ "Print", "a", "list", "of", "all", "IPA", "characters", "in", "the", "given", "string", "." ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/__main__.py#L126-L146
pettarin/ipapy
ipapy/__main__.py
command_check
def command_check(string, vargs): """ Check if the given string is IPA valid. If the given string is not IPA valid, print the invalid characters. :param str string: the string to act upon :param dict vargs: the command line arguments """ is_valid = is_valid_ipa(string) print(is_valid) if not is_valid: valid_chars, invalid_chars = remove_invalid_ipa_characters( unicode_string=string, return_invalid=True ) print_invalid_chars(invalid_chars, vargs)
python
def command_check(string, vargs): """ Check if the given string is IPA valid. If the given string is not IPA valid, print the invalid characters. :param str string: the string to act upon :param dict vargs: the command line arguments """ is_valid = is_valid_ipa(string) print(is_valid) if not is_valid: valid_chars, invalid_chars = remove_invalid_ipa_characters( unicode_string=string, return_invalid=True ) print_invalid_chars(invalid_chars, vargs)
[ "def", "command_check", "(", "string", ",", "vargs", ")", ":", "is_valid", "=", "is_valid_ipa", "(", "string", ")", "print", "(", "is_valid", ")", "if", "not", "is_valid", ":", "valid_chars", ",", "invalid_chars", "=", "remove_invalid_ipa_characters", "(", "unicode_string", "=", "string", ",", "return_invalid", "=", "True", ")", "print_invalid_chars", "(", "invalid_chars", ",", "vargs", ")" ]
Check if the given string is IPA valid. If the given string is not IPA valid, print the invalid characters. :param str string: the string to act upon :param dict vargs: the command line arguments
[ "Check", "if", "the", "given", "string", "is", "IPA", "valid", "." ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/__main__.py#L148-L165
pettarin/ipapy
ipapy/__main__.py
command_clean
def command_clean(string, vargs): """ Remove characters that are not IPA valid from the given string, and print the remaining string. :param str string: the string to act upon :param dict vargs: the command line arguments """ valid_chars, invalid_chars = remove_invalid_ipa_characters( unicode_string=string, return_invalid=True, single_char_parsing=vargs["single_char_parsing"] ) print(u"".join(valid_chars)) print_invalid_chars(invalid_chars, vargs)
python
def command_clean(string, vargs): """ Remove characters that are not IPA valid from the given string, and print the remaining string. :param str string: the string to act upon :param dict vargs: the command line arguments """ valid_chars, invalid_chars = remove_invalid_ipa_characters( unicode_string=string, return_invalid=True, single_char_parsing=vargs["single_char_parsing"] ) print(u"".join(valid_chars)) print_invalid_chars(invalid_chars, vargs)
[ "def", "command_clean", "(", "string", ",", "vargs", ")", ":", "valid_chars", ",", "invalid_chars", "=", "remove_invalid_ipa_characters", "(", "unicode_string", "=", "string", ",", "return_invalid", "=", "True", ",", "single_char_parsing", "=", "vargs", "[", "\"single_char_parsing\"", "]", ")", "print", "(", "u\"\"", ".", "join", "(", "valid_chars", ")", ")", "print_invalid_chars", "(", "invalid_chars", ",", "vargs", ")" ]
Remove characters that are not IPA valid from the given string, and print the remaining string. :param str string: the string to act upon :param dict vargs: the command line arguments
[ "Remove", "characters", "that", "are", "not", "IPA", "valid", "from", "the", "given", "string", "and", "print", "the", "remaining", "string", "." ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/__main__.py#L167-L181
pettarin/ipapy
ipapy/__main__.py
command_u2a
def command_u2a(string, vargs): """ Print the ARPABEY ASCII string corresponding to the given Unicode IPA string. :param str string: the string to act upon :param dict vargs: the command line arguments """ try: l = ARPABETMapper().map_unicode_string( unicode_string=string, ignore=vargs["ignore"], single_char_parsing=vargs["single_char_parsing"], return_as_list=True ) print(vargs["separator"].join(l)) except ValueError as exc: print_error(str(exc))
python
def command_u2a(string, vargs): """ Print the ARPABEY ASCII string corresponding to the given Unicode IPA string. :param str string: the string to act upon :param dict vargs: the command line arguments """ try: l = ARPABETMapper().map_unicode_string( unicode_string=string, ignore=vargs["ignore"], single_char_parsing=vargs["single_char_parsing"], return_as_list=True ) print(vargs["separator"].join(l)) except ValueError as exc: print_error(str(exc))
[ "def", "command_u2a", "(", "string", ",", "vargs", ")", ":", "try", ":", "l", "=", "ARPABETMapper", "(", ")", ".", "map_unicode_string", "(", "unicode_string", "=", "string", ",", "ignore", "=", "vargs", "[", "\"ignore\"", "]", ",", "single_char_parsing", "=", "vargs", "[", "\"single_char_parsing\"", "]", ",", "return_as_list", "=", "True", ")", "print", "(", "vargs", "[", "\"separator\"", "]", ".", "join", "(", "l", ")", ")", "except", "ValueError", "as", "exc", ":", "print_error", "(", "str", "(", "exc", ")", ")" ]
Print the ARPABEY ASCII string corresponding to the given Unicode IPA string. :param str string: the string to act upon :param dict vargs: the command line arguments
[ "Print", "the", "ARPABEY", "ASCII", "string", "corresponding", "to", "the", "given", "Unicode", "IPA", "string", "." ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/__main__.py#L183-L199
pettarin/ipapy
ipapy/__main__.py
command_u2k
def command_u2k(string, vargs): """ Print the Kirshenbaum ASCII string corresponding to the given Unicode IPA string. :param str string: the string to act upon :param dict vargs: the command line arguments """ try: l = KirshenbaumMapper().map_unicode_string( unicode_string=string, ignore=vargs["ignore"], single_char_parsing=vargs["single_char_parsing"], return_as_list=True ) print(vargs["separator"].join(l)) except ValueError as exc: print_error(str(exc))
python
def command_u2k(string, vargs): """ Print the Kirshenbaum ASCII string corresponding to the given Unicode IPA string. :param str string: the string to act upon :param dict vargs: the command line arguments """ try: l = KirshenbaumMapper().map_unicode_string( unicode_string=string, ignore=vargs["ignore"], single_char_parsing=vargs["single_char_parsing"], return_as_list=True ) print(vargs["separator"].join(l)) except ValueError as exc: print_error(str(exc))
[ "def", "command_u2k", "(", "string", ",", "vargs", ")", ":", "try", ":", "l", "=", "KirshenbaumMapper", "(", ")", ".", "map_unicode_string", "(", "unicode_string", "=", "string", ",", "ignore", "=", "vargs", "[", "\"ignore\"", "]", ",", "single_char_parsing", "=", "vargs", "[", "\"single_char_parsing\"", "]", ",", "return_as_list", "=", "True", ")", "print", "(", "vargs", "[", "\"separator\"", "]", ".", "join", "(", "l", ")", ")", "except", "ValueError", "as", "exc", ":", "print_error", "(", "str", "(", "exc", ")", ")" ]
Print the Kirshenbaum ASCII string corresponding to the given Unicode IPA string. :param str string: the string to act upon :param dict vargs: the command line arguments
[ "Print", "the", "Kirshenbaum", "ASCII", "string", "corresponding", "to", "the", "given", "Unicode", "IPA", "string", "." ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/__main__.py#L201-L217
pettarin/ipapy
ipapy/__main__.py
main
def main(): """ Entry point. """ parser = argparse.ArgumentParser(description=DESCRIPTION) for arg in ARGUMENTS: if "action" in arg: if arg["short"] is not None: parser.add_argument(arg["short"], arg["long"], action=arg["action"], help=arg["help"]) else: parser.add_argument(arg["long"], action=arg["action"], help=arg["help"]) else: if arg["short"] is not None: parser.add_argument(arg["short"], arg["long"], nargs=arg["nargs"], type=arg["type"], default=arg["default"], help=arg["help"]) else: parser.add_argument(arg["long"], nargs=arg["nargs"], type=arg["type"], default=arg["default"], help=arg["help"]) vargs = vars(parser.parse_args()) command = vargs["command"] string = to_unicode_string(vargs["string"]) if command not in COMMAND_MAP: parser.print_help() sys.exit(2) COMMAND_MAP[command](string, vargs) sys.exit(0)
python
def main(): """ Entry point. """ parser = argparse.ArgumentParser(description=DESCRIPTION) for arg in ARGUMENTS: if "action" in arg: if arg["short"] is not None: parser.add_argument(arg["short"], arg["long"], action=arg["action"], help=arg["help"]) else: parser.add_argument(arg["long"], action=arg["action"], help=arg["help"]) else: if arg["short"] is not None: parser.add_argument(arg["short"], arg["long"], nargs=arg["nargs"], type=arg["type"], default=arg["default"], help=arg["help"]) else: parser.add_argument(arg["long"], nargs=arg["nargs"], type=arg["type"], default=arg["default"], help=arg["help"]) vargs = vars(parser.parse_args()) command = vargs["command"] string = to_unicode_string(vargs["string"]) if command not in COMMAND_MAP: parser.print_help() sys.exit(2) COMMAND_MAP[command](string, vargs) sys.exit(0)
[ "def", "main", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "DESCRIPTION", ")", "for", "arg", "in", "ARGUMENTS", ":", "if", "\"action\"", "in", "arg", ":", "if", "arg", "[", "\"short\"", "]", "is", "not", "None", ":", "parser", ".", "add_argument", "(", "arg", "[", "\"short\"", "]", ",", "arg", "[", "\"long\"", "]", ",", "action", "=", "arg", "[", "\"action\"", "]", ",", "help", "=", "arg", "[", "\"help\"", "]", ")", "else", ":", "parser", ".", "add_argument", "(", "arg", "[", "\"long\"", "]", ",", "action", "=", "arg", "[", "\"action\"", "]", ",", "help", "=", "arg", "[", "\"help\"", "]", ")", "else", ":", "if", "arg", "[", "\"short\"", "]", "is", "not", "None", ":", "parser", ".", "add_argument", "(", "arg", "[", "\"short\"", "]", ",", "arg", "[", "\"long\"", "]", ",", "nargs", "=", "arg", "[", "\"nargs\"", "]", ",", "type", "=", "arg", "[", "\"type\"", "]", ",", "default", "=", "arg", "[", "\"default\"", "]", ",", "help", "=", "arg", "[", "\"help\"", "]", ")", "else", ":", "parser", ".", "add_argument", "(", "arg", "[", "\"long\"", "]", ",", "nargs", "=", "arg", "[", "\"nargs\"", "]", ",", "type", "=", "arg", "[", "\"type\"", "]", ",", "default", "=", "arg", "[", "\"default\"", "]", ",", "help", "=", "arg", "[", "\"help\"", "]", ")", "vargs", "=", "vars", "(", "parser", ".", "parse_args", "(", ")", ")", "command", "=", "vargs", "[", "\"command\"", "]", "string", "=", "to_unicode_string", "(", "vargs", "[", "\"string\"", "]", ")", "if", "command", "not", "in", "COMMAND_MAP", ":", "parser", ".", "print_help", "(", ")", "sys", ".", "exit", "(", "2", ")", "COMMAND_MAP", "[", "command", "]", "(", "string", ",", "vargs", ")", "sys", ".", "exit", "(", "0", ")" ]
Entry point.
[ "Entry", "point", "." ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/__main__.py#L228-L251
pettarin/ipapy
ipapy/ipastring.py
IPAString.ipa_chars
def ipa_chars(self, value): """ Set the list of IPAChar objects composing the IPA string :param list value: list of IPAChar objects """ if value is None: self.__ipa_chars = [] else: if is_list_of_ipachars(value): self.__ipa_chars = value else: raise TypeError("ipa_chars only accepts a list of IPAChar objects")
python
def ipa_chars(self, value): """ Set the list of IPAChar objects composing the IPA string :param list value: list of IPAChar objects """ if value is None: self.__ipa_chars = [] else: if is_list_of_ipachars(value): self.__ipa_chars = value else: raise TypeError("ipa_chars only accepts a list of IPAChar objects")
[ "def", "ipa_chars", "(", "self", ",", "value", ")", ":", "if", "value", "is", "None", ":", "self", ".", "__ipa_chars", "=", "[", "]", "else", ":", "if", "is_list_of_ipachars", "(", "value", ")", ":", "self", ".", "__ipa_chars", "=", "value", "else", ":", "raise", "TypeError", "(", "\"ipa_chars only accepts a list of IPAChar objects\"", ")" ]
Set the list of IPAChar objects composing the IPA string :param list value: list of IPAChar objects
[ "Set", "the", "list", "of", "IPAChar", "objects", "composing", "the", "IPA", "string" ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/ipastring.py#L111-L123
pettarin/ipapy
ipapy/ipastring.py
IPAString.is_equivalent
def is_equivalent(self, other, ignore=False): """ Return ``True`` if the IPA string is equivalent to the ``other`` object. The ``other`` object can be: 1. a Unicode string, 2. a list of IPAChar objects, and 3. another IPAString. :param variant other: the object to be compared against :param bool ignore: if other is a Unicode string, ignore Unicode characters not IPA valid :rtype: bool """ def is_equivalent_to_list_of_ipachars(other): """ Return ``True`` if the list of IPAChar objects in the canonical representation of the string is the same as the given list. :param list other: list of IPAChar objects :rtype: bool """ my_ipa_chars = self.canonical_representation.ipa_chars if len(my_ipa_chars) != len(other): return False for i in range(len(my_ipa_chars)): if not my_ipa_chars[i].is_equivalent(other[i]): return False return True if is_unicode_string(other): try: return is_equivalent_to_list_of_ipachars(IPAString(unicode_string=other, ignore=ignore).ipa_chars) except: return False if is_list_of_ipachars(other): try: return is_equivalent_to_list_of_ipachars(other) except: return False if isinstance(other, IPAString): return is_equivalent_to_list_of_ipachars(other.canonical_representation.ipa_chars) return False
python
def is_equivalent(self, other, ignore=False): """ Return ``True`` if the IPA string is equivalent to the ``other`` object. The ``other`` object can be: 1. a Unicode string, 2. a list of IPAChar objects, and 3. another IPAString. :param variant other: the object to be compared against :param bool ignore: if other is a Unicode string, ignore Unicode characters not IPA valid :rtype: bool """ def is_equivalent_to_list_of_ipachars(other): """ Return ``True`` if the list of IPAChar objects in the canonical representation of the string is the same as the given list. :param list other: list of IPAChar objects :rtype: bool """ my_ipa_chars = self.canonical_representation.ipa_chars if len(my_ipa_chars) != len(other): return False for i in range(len(my_ipa_chars)): if not my_ipa_chars[i].is_equivalent(other[i]): return False return True if is_unicode_string(other): try: return is_equivalent_to_list_of_ipachars(IPAString(unicode_string=other, ignore=ignore).ipa_chars) except: return False if is_list_of_ipachars(other): try: return is_equivalent_to_list_of_ipachars(other) except: return False if isinstance(other, IPAString): return is_equivalent_to_list_of_ipachars(other.canonical_representation.ipa_chars) return False
[ "def", "is_equivalent", "(", "self", ",", "other", ",", "ignore", "=", "False", ")", ":", "def", "is_equivalent_to_list_of_ipachars", "(", "other", ")", ":", "\"\"\"\n Return ``True`` if the list of IPAChar objects\n in the canonical representation of the string\n is the same as the given list.\n\n :param list other: list of IPAChar objects\n :rtype: bool\n \"\"\"", "my_ipa_chars", "=", "self", ".", "canonical_representation", ".", "ipa_chars", "if", "len", "(", "my_ipa_chars", ")", "!=", "len", "(", "other", ")", ":", "return", "False", "for", "i", "in", "range", "(", "len", "(", "my_ipa_chars", ")", ")", ":", "if", "not", "my_ipa_chars", "[", "i", "]", ".", "is_equivalent", "(", "other", "[", "i", "]", ")", ":", "return", "False", "return", "True", "if", "is_unicode_string", "(", "other", ")", ":", "try", ":", "return", "is_equivalent_to_list_of_ipachars", "(", "IPAString", "(", "unicode_string", "=", "other", ",", "ignore", "=", "ignore", ")", ".", "ipa_chars", ")", "except", ":", "return", "False", "if", "is_list_of_ipachars", "(", "other", ")", ":", "try", ":", "return", "is_equivalent_to_list_of_ipachars", "(", "other", ")", "except", ":", "return", "False", "if", "isinstance", "(", "other", ",", "IPAString", ")", ":", "return", "is_equivalent_to_list_of_ipachars", "(", "other", ".", "canonical_representation", ".", "ipa_chars", ")", "return", "False" ]
Return ``True`` if the IPA string is equivalent to the ``other`` object. The ``other`` object can be: 1. a Unicode string, 2. a list of IPAChar objects, and 3. another IPAString. :param variant other: the object to be compared against :param bool ignore: if other is a Unicode string, ignore Unicode characters not IPA valid :rtype: bool
[ "Return", "True", "if", "the", "IPA", "string", "is", "equivalent", "to", "the", "other", "object", "." ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/ipastring.py#L125-L168
pettarin/ipapy
ipapy/ipastring.py
IPAString.canonical_representation
def canonical_representation(self): """ Return a new IPAString, containing the canonical representation of the current string, that is, the one composed by the (prefix) minimum number of IPAChar objects. :rtype: IPAString """ return IPAString(unicode_string=u"".join([c.__unicode__() for c in self.ipa_chars]))
python
def canonical_representation(self): """ Return a new IPAString, containing the canonical representation of the current string, that is, the one composed by the (prefix) minimum number of IPAChar objects. :rtype: IPAString """ return IPAString(unicode_string=u"".join([c.__unicode__() for c in self.ipa_chars]))
[ "def", "canonical_representation", "(", "self", ")", ":", "return", "IPAString", "(", "unicode_string", "=", "u\"\"", ".", "join", "(", "[", "c", ".", "__unicode__", "(", ")", "for", "c", "in", "self", ".", "ipa_chars", "]", ")", ")" ]
Return a new IPAString, containing the canonical representation of the current string, that is, the one composed by the (prefix) minimum number of IPAChar objects. :rtype: IPAString
[ "Return", "a", "new", "IPAString", "containing", "the", "canonical", "representation", "of", "the", "current", "string", "that", "is", "the", "one", "composed", "by", "the", "(", "prefix", ")", "minimum", "number", "of", "IPAChar", "objects", "." ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/ipastring.py#L171-L178
pettarin/ipapy
ipapy/ipastring.py
IPAString.filter_chars
def filter_chars(self, chars=u""): """ Return a new IPAString, containing only the IPA characters specified by the ``chars`` string. Valid values for ``chars`` are: * ``consonants`` or ``cns`` * ``vowels`` or ``vwl`` * ``letters`` or ``cns_vwl`` * ``cns_vwl_pstr`` or ``cvp`` * ``cns_vwl_pstr_long`` or ``cvpl`` * ``cns_vwl_str`` or ``cvs`` * ``cns_vwl_str_len`` or ``cvsl`` * ``cns_vwl_str_len_wb`` or ``cvslw`` * ``cns_vwl_str_len_wb_sb`` or ``cvslws`` :rtype: IPAString """ if chars in [u"cns", u"consonants"]: return self.consonants elif chars in [u"vwl", u"vowels"]: return self.vowels elif chars in [u"cns_vwl", u"letters"]: return self.letters elif chars in [u"cns_vwl_pstr", u"cvp"]: return self.cns_vwl_pstr elif chars in [u"cns_vwl_pstr_long", u"cvpl"]: return self.cns_vwl_pstr_long elif chars in [u"cns_vwl_str", u"cvs"]: return self.cns_vwl_str elif chars in [u"cns_vwl_str_len", u"cvsl"]: return self.cns_vwl_str_len elif chars in [u"cns_vwl_str_len_wb", u"cvslw"]: return self.cns_vwl_str_len_wb elif chars in [u"cns_vwl_str_len_wb_sb", u"cvslws"]: return self.cns_vwl_str_len_wb_sb return self
python
def filter_chars(self, chars=u""): """ Return a new IPAString, containing only the IPA characters specified by the ``chars`` string. Valid values for ``chars`` are: * ``consonants`` or ``cns`` * ``vowels`` or ``vwl`` * ``letters`` or ``cns_vwl`` * ``cns_vwl_pstr`` or ``cvp`` * ``cns_vwl_pstr_long`` or ``cvpl`` * ``cns_vwl_str`` or ``cvs`` * ``cns_vwl_str_len`` or ``cvsl`` * ``cns_vwl_str_len_wb`` or ``cvslw`` * ``cns_vwl_str_len_wb_sb`` or ``cvslws`` :rtype: IPAString """ if chars in [u"cns", u"consonants"]: return self.consonants elif chars in [u"vwl", u"vowels"]: return self.vowels elif chars in [u"cns_vwl", u"letters"]: return self.letters elif chars in [u"cns_vwl_pstr", u"cvp"]: return self.cns_vwl_pstr elif chars in [u"cns_vwl_pstr_long", u"cvpl"]: return self.cns_vwl_pstr_long elif chars in [u"cns_vwl_str", u"cvs"]: return self.cns_vwl_str elif chars in [u"cns_vwl_str_len", u"cvsl"]: return self.cns_vwl_str_len elif chars in [u"cns_vwl_str_len_wb", u"cvslw"]: return self.cns_vwl_str_len_wb elif chars in [u"cns_vwl_str_len_wb_sb", u"cvslws"]: return self.cns_vwl_str_len_wb_sb return self
[ "def", "filter_chars", "(", "self", ",", "chars", "=", "u\"\"", ")", ":", "if", "chars", "in", "[", "u\"cns\"", ",", "u\"consonants\"", "]", ":", "return", "self", ".", "consonants", "elif", "chars", "in", "[", "u\"vwl\"", ",", "u\"vowels\"", "]", ":", "return", "self", ".", "vowels", "elif", "chars", "in", "[", "u\"cns_vwl\"", ",", "u\"letters\"", "]", ":", "return", "self", ".", "letters", "elif", "chars", "in", "[", "u\"cns_vwl_pstr\"", ",", "u\"cvp\"", "]", ":", "return", "self", ".", "cns_vwl_pstr", "elif", "chars", "in", "[", "u\"cns_vwl_pstr_long\"", ",", "u\"cvpl\"", "]", ":", "return", "self", ".", "cns_vwl_pstr_long", "elif", "chars", "in", "[", "u\"cns_vwl_str\"", ",", "u\"cvs\"", "]", ":", "return", "self", ".", "cns_vwl_str", "elif", "chars", "in", "[", "u\"cns_vwl_str_len\"", ",", "u\"cvsl\"", "]", ":", "return", "self", ".", "cns_vwl_str_len", "elif", "chars", "in", "[", "u\"cns_vwl_str_len_wb\"", ",", "u\"cvslw\"", "]", ":", "return", "self", ".", "cns_vwl_str_len_wb", "elif", "chars", "in", "[", "u\"cns_vwl_str_len_wb_sb\"", ",", "u\"cvslws\"", "]", ":", "return", "self", ".", "cns_vwl_str_len_wb_sb", "return", "self" ]
Return a new IPAString, containing only the IPA characters specified by the ``chars`` string. Valid values for ``chars`` are: * ``consonants`` or ``cns`` * ``vowels`` or ``vwl`` * ``letters`` or ``cns_vwl`` * ``cns_vwl_pstr`` or ``cvp`` * ``cns_vwl_pstr_long`` or ``cvpl`` * ``cns_vwl_str`` or ``cvs`` * ``cns_vwl_str_len`` or ``cvsl`` * ``cns_vwl_str_len_wb`` or ``cvslw`` * ``cns_vwl_str_len_wb_sb`` or ``cvslws`` :rtype: IPAString
[ "Return", "a", "new", "IPAString", "containing", "only", "the", "IPA", "characters", "specified", "by", "the", "chars", "string", "." ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/ipastring.py#L180-L217
pettarin/ipapy
ipapy/ipastring.py
IPAString.consonants
def consonants(self): """ Return a new IPAString, containing only the consonants in the current string. :rtype: IPAString """ return IPAString(ipa_chars=[c for c in self.ipa_chars if c.is_consonant])
python
def consonants(self): """ Return a new IPAString, containing only the consonants in the current string. :rtype: IPAString """ return IPAString(ipa_chars=[c for c in self.ipa_chars if c.is_consonant])
[ "def", "consonants", "(", "self", ")", ":", "return", "IPAString", "(", "ipa_chars", "=", "[", "c", "for", "c", "in", "self", ".", "ipa_chars", "if", "c", ".", "is_consonant", "]", ")" ]
Return a new IPAString, containing only the consonants in the current string. :rtype: IPAString
[ "Return", "a", "new", "IPAString", "containing", "only", "the", "consonants", "in", "the", "current", "string", "." ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/ipastring.py#L220-L226
pettarin/ipapy
ipapy/ipastring.py
IPAString.vowels
def vowels(self): """ Return a new IPAString, containing only the vowels in the current string. :rtype: IPAString """ return IPAString(ipa_chars=[c for c in self.ipa_chars if c.is_vowel])
python
def vowels(self): """ Return a new IPAString, containing only the vowels in the current string. :rtype: IPAString """ return IPAString(ipa_chars=[c for c in self.ipa_chars if c.is_vowel])
[ "def", "vowels", "(", "self", ")", ":", "return", "IPAString", "(", "ipa_chars", "=", "[", "c", "for", "c", "in", "self", ".", "ipa_chars", "if", "c", ".", "is_vowel", "]", ")" ]
Return a new IPAString, containing only the vowels in the current string. :rtype: IPAString
[ "Return", "a", "new", "IPAString", "containing", "only", "the", "vowels", "in", "the", "current", "string", "." ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/ipastring.py#L229-L235
pettarin/ipapy
ipapy/ipastring.py
IPAString.cns_vwl
def cns_vwl(self): """ Return a new IPAString, containing only: 1. the consonants, and 2. the vowels in the current string. :rtype: IPAString """ return IPAString(ipa_chars=[c for c in self.ipa_chars if c.is_letter])
python
def cns_vwl(self): """ Return a new IPAString, containing only: 1. the consonants, and 2. the vowels in the current string. :rtype: IPAString """ return IPAString(ipa_chars=[c for c in self.ipa_chars if c.is_letter])
[ "def", "cns_vwl", "(", "self", ")", ":", "return", "IPAString", "(", "ipa_chars", "=", "[", "c", "for", "c", "in", "self", ".", "ipa_chars", "if", "c", ".", "is_letter", "]", ")" ]
Return a new IPAString, containing only: 1. the consonants, and 2. the vowels in the current string. :rtype: IPAString
[ "Return", "a", "new", "IPAString", "containing", "only", ":", "1", ".", "the", "consonants", "and", "2", ".", "the", "vowels" ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/ipastring.py#L249-L260
pettarin/ipapy
ipapy/ipastring.py
IPAString.cns_vwl_pstr
def cns_vwl_pstr(self): """ Return a new IPAString, containing only: 1. the consonants, 2. the vowels, and 3. the primary stress diacritics in the current string. :rtype: IPAString """ return IPAString(ipa_chars=[c for c in self.ipa_chars if (c.is_letter) or (c.is_suprasegmental and c.is_primary_stress)])
python
def cns_vwl_pstr(self): """ Return a new IPAString, containing only: 1. the consonants, 2. the vowels, and 3. the primary stress diacritics in the current string. :rtype: IPAString """ return IPAString(ipa_chars=[c for c in self.ipa_chars if (c.is_letter) or (c.is_suprasegmental and c.is_primary_stress)])
[ "def", "cns_vwl_pstr", "(", "self", ")", ":", "return", "IPAString", "(", "ipa_chars", "=", "[", "c", "for", "c", "in", "self", ".", "ipa_chars", "if", "(", "c", ".", "is_letter", ")", "or", "(", "c", ".", "is_suprasegmental", "and", "c", ".", "is_primary_stress", ")", "]", ")" ]
Return a new IPAString, containing only: 1. the consonants, 2. the vowels, and 3. the primary stress diacritics in the current string. :rtype: IPAString
[ "Return", "a", "new", "IPAString", "containing", "only", ":", "1", ".", "the", "consonants", "2", ".", "the", "vowels", "and", "3", ".", "the", "primary", "stress", "diacritics" ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/ipastring.py#L263-L275
pettarin/ipapy
ipapy/ipastring.py
IPAString.cns_vwl_str
def cns_vwl_str(self): """ Return a new IPAString, containing only: 1. the consonants, 2. the vowels, and 3. the stress diacritics in the current string. :rtype: IPAString """ return IPAString(ipa_chars=[c for c in self.ipa_chars if (c.is_letter) or (c.is_suprasegmental and c.is_stress)])
python
def cns_vwl_str(self): """ Return a new IPAString, containing only: 1. the consonants, 2. the vowels, and 3. the stress diacritics in the current string. :rtype: IPAString """ return IPAString(ipa_chars=[c for c in self.ipa_chars if (c.is_letter) or (c.is_suprasegmental and c.is_stress)])
[ "def", "cns_vwl_str", "(", "self", ")", ":", "return", "IPAString", "(", "ipa_chars", "=", "[", "c", "for", "c", "in", "self", ".", "ipa_chars", "if", "(", "c", ".", "is_letter", ")", "or", "(", "c", ".", "is_suprasegmental", "and", "c", ".", "is_stress", ")", "]", ")" ]
Return a new IPAString, containing only: 1. the consonants, 2. the vowels, and 3. the stress diacritics in the current string. :rtype: IPAString
[ "Return", "a", "new", "IPAString", "containing", "only", ":", "1", ".", "the", "consonants", "2", ".", "the", "vowels", "and", "3", ".", "the", "stress", "diacritics" ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/ipastring.py#L278-L290
pettarin/ipapy
ipapy/ipastring.py
IPAString.cns_vwl_str_len
def cns_vwl_str_len(self): """ Return a new IPAString, containing only: 1. the consonants, 2. the vowels, and 3. the stress diacritics, and 4. the length diacritics in the current string. :rtype: IPAString """ return IPAString(ipa_chars=[c for c in self.ipa_chars if (c.is_letter) or (c.is_suprasegmental and (c.is_stress or c.is_length))])
python
def cns_vwl_str_len(self): """ Return a new IPAString, containing only: 1. the consonants, 2. the vowels, and 3. the stress diacritics, and 4. the length diacritics in the current string. :rtype: IPAString """ return IPAString(ipa_chars=[c for c in self.ipa_chars if (c.is_letter) or (c.is_suprasegmental and (c.is_stress or c.is_length))])
[ "def", "cns_vwl_str_len", "(", "self", ")", ":", "return", "IPAString", "(", "ipa_chars", "=", "[", "c", "for", "c", "in", "self", ".", "ipa_chars", "if", "(", "c", ".", "is_letter", ")", "or", "(", "c", ".", "is_suprasegmental", "and", "(", "c", ".", "is_stress", "or", "c", ".", "is_length", ")", ")", "]", ")" ]
Return a new IPAString, containing only: 1. the consonants, 2. the vowels, and 3. the stress diacritics, and 4. the length diacritics in the current string. :rtype: IPAString
[ "Return", "a", "new", "IPAString", "containing", "only", ":", "1", ".", "the", "consonants", "2", ".", "the", "vowels", "and", "3", ".", "the", "stress", "diacritics", "and", "4", ".", "the", "length", "diacritics" ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/ipastring.py#L293-L306
pettarin/ipapy
ipapy/ipastring.py
IPAString.cns_vwl_pstr_long
def cns_vwl_pstr_long(self): """ Return a new IPAString, containing only: 1. the consonants, 2. the vowels, and 3. the primary stress diacritics, and 4. the long suprasegmental in the current string. :rtype: IPAString """ return IPAString(ipa_chars=[c for c in self.ipa_chars if (c.is_letter) or (c.is_suprasegmental and (c.is_primary_stress or c.is_long))])
python
def cns_vwl_pstr_long(self): """ Return a new IPAString, containing only: 1. the consonants, 2. the vowels, and 3. the primary stress diacritics, and 4. the long suprasegmental in the current string. :rtype: IPAString """ return IPAString(ipa_chars=[c for c in self.ipa_chars if (c.is_letter) or (c.is_suprasegmental and (c.is_primary_stress or c.is_long))])
[ "def", "cns_vwl_pstr_long", "(", "self", ")", ":", "return", "IPAString", "(", "ipa_chars", "=", "[", "c", "for", "c", "in", "self", ".", "ipa_chars", "if", "(", "c", ".", "is_letter", ")", "or", "(", "c", ".", "is_suprasegmental", "and", "(", "c", ".", "is_primary_stress", "or", "c", ".", "is_long", ")", ")", "]", ")" ]
Return a new IPAString, containing only: 1. the consonants, 2. the vowels, and 3. the primary stress diacritics, and 4. the long suprasegmental in the current string. :rtype: IPAString
[ "Return", "a", "new", "IPAString", "containing", "only", ":", "1", ".", "the", "consonants", "2", ".", "the", "vowels", "and", "3", ".", "the", "primary", "stress", "diacritics", "and", "4", ".", "the", "long", "suprasegmental" ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/ipastring.py#L309-L322
pettarin/ipapy
ipapy/ipastring.py
IPAString.cns_vwl_str_len_wb
def cns_vwl_str_len_wb(self): """ Return a new IPAString, containing only: 1. the consonants, 2. the vowels, and 3. the stress diacritics, 4. the length diacritics, and 5. the word breaks in the current string. :rtype: IPAString """ return IPAString(ipa_chars=[c for c in self.ipa_chars if (c.is_letter) or (c.is_suprasegmental and (c.is_stress or c.is_length or c.is_word_break))])
python
def cns_vwl_str_len_wb(self): """ Return a new IPAString, containing only: 1. the consonants, 2. the vowels, and 3. the stress diacritics, 4. the length diacritics, and 5. the word breaks in the current string. :rtype: IPAString """ return IPAString(ipa_chars=[c for c in self.ipa_chars if (c.is_letter) or (c.is_suprasegmental and (c.is_stress or c.is_length or c.is_word_break))])
[ "def", "cns_vwl_str_len_wb", "(", "self", ")", ":", "return", "IPAString", "(", "ipa_chars", "=", "[", "c", "for", "c", "in", "self", ".", "ipa_chars", "if", "(", "c", ".", "is_letter", ")", "or", "(", "c", ".", "is_suprasegmental", "and", "(", "c", ".", "is_stress", "or", "c", ".", "is_length", "or", "c", ".", "is_word_break", ")", ")", "]", ")" ]
Return a new IPAString, containing only: 1. the consonants, 2. the vowels, and 3. the stress diacritics, 4. the length diacritics, and 5. the word breaks in the current string. :rtype: IPAString
[ "Return", "a", "new", "IPAString", "containing", "only", ":", "1", ".", "the", "consonants", "2", ".", "the", "vowels", "and", "3", ".", "the", "stress", "diacritics", "4", ".", "the", "length", "diacritics", "and", "5", ".", "the", "word", "breaks" ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/ipastring.py#L325-L339
pettarin/ipapy
ipapy/ipastring.py
IPAString.cns_vwl_str_len_wb_sb
def cns_vwl_str_len_wb_sb(self): """ Return a new IPAString, containing only: 1. the consonants, 2. the vowels, and 3. the stress diacritics, 4. the length diacritics, 5. the word breaks, and 6. the syllable breaks in the current string. :rtype: IPAString """ return IPAString(ipa_chars=[c for c in self.ipa_chars if (c.is_letter) or (c.is_suprasegmental and (c.is_stress or c.is_length or c.is_word_break or c.is_syllable_break))])
python
def cns_vwl_str_len_wb_sb(self): """ Return a new IPAString, containing only: 1. the consonants, 2. the vowels, and 3. the stress diacritics, 4. the length diacritics, 5. the word breaks, and 6. the syllable breaks in the current string. :rtype: IPAString """ return IPAString(ipa_chars=[c for c in self.ipa_chars if (c.is_letter) or (c.is_suprasegmental and (c.is_stress or c.is_length or c.is_word_break or c.is_syllable_break))])
[ "def", "cns_vwl_str_len_wb_sb", "(", "self", ")", ":", "return", "IPAString", "(", "ipa_chars", "=", "[", "c", "for", "c", "in", "self", ".", "ipa_chars", "if", "(", "c", ".", "is_letter", ")", "or", "(", "c", ".", "is_suprasegmental", "and", "(", "c", ".", "is_stress", "or", "c", ".", "is_length", "or", "c", ".", "is_word_break", "or", "c", ".", "is_syllable_break", ")", ")", "]", ")" ]
Return a new IPAString, containing only: 1. the consonants, 2. the vowels, and 3. the stress diacritics, 4. the length diacritics, 5. the word breaks, and 6. the syllable breaks in the current string. :rtype: IPAString
[ "Return", "a", "new", "IPAString", "containing", "only", ":", "1", ".", "the", "consonants", "2", ".", "the", "vowels", "and", "3", ".", "the", "stress", "diacritics", "4", ".", "the", "length", "diacritics", "5", ".", "the", "word", "breaks", "and", "6", ".", "the", "syllable", "breaks" ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/ipastring.py#L342-L357
pettarin/ipapy
ipapy/data/__init__.py
convert_unicode_field
def convert_unicode_field(string): """ Convert a Unicode field into the corresponding list of Unicode strings. The (input) Unicode field is a Unicode string containing one or more Unicode codepoints (``xxxx`` or ``U+xxxx`` or ``xxxx_yyyy``), separated by a space. :param str string: the (input) Unicode field :rtype: list of Unicode strings """ values = [] for codepoint in [s for s in string.split(DATA_FILE_CODEPOINT_SEPARATOR) if (s != DATA_FILE_VALUE_NOT_AVAILABLE) and (len(s) > 0)]: values.append(u"".join([hex_to_unichr(c) for c in codepoint.split(DATA_FILE_CODEPOINT_JOINER)])) return values
python
def convert_unicode_field(string): """ Convert a Unicode field into the corresponding list of Unicode strings. The (input) Unicode field is a Unicode string containing one or more Unicode codepoints (``xxxx`` or ``U+xxxx`` or ``xxxx_yyyy``), separated by a space. :param str string: the (input) Unicode field :rtype: list of Unicode strings """ values = [] for codepoint in [s for s in string.split(DATA_FILE_CODEPOINT_SEPARATOR) if (s != DATA_FILE_VALUE_NOT_AVAILABLE) and (len(s) > 0)]: values.append(u"".join([hex_to_unichr(c) for c in codepoint.split(DATA_FILE_CODEPOINT_JOINER)])) return values
[ "def", "convert_unicode_field", "(", "string", ")", ":", "values", "=", "[", "]", "for", "codepoint", "in", "[", "s", "for", "s", "in", "string", ".", "split", "(", "DATA_FILE_CODEPOINT_SEPARATOR", ")", "if", "(", "s", "!=", "DATA_FILE_VALUE_NOT_AVAILABLE", ")", "and", "(", "len", "(", "s", ")", ">", "0", ")", "]", ":", "values", ".", "append", "(", "u\"\"", ".", "join", "(", "[", "hex_to_unichr", "(", "c", ")", "for", "c", "in", "codepoint", ".", "split", "(", "DATA_FILE_CODEPOINT_JOINER", ")", "]", ")", ")", "return", "values" ]
Convert a Unicode field into the corresponding list of Unicode strings. The (input) Unicode field is a Unicode string containing one or more Unicode codepoints (``xxxx`` or ``U+xxxx`` or ``xxxx_yyyy``), separated by a space. :param str string: the (input) Unicode field :rtype: list of Unicode strings
[ "Convert", "a", "Unicode", "field", "into", "the", "corresponding", "list", "of", "Unicode", "strings", "." ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/data/__init__.py#L55-L69
pettarin/ipapy
ipapy/data/__init__.py
convert_ascii_field
def convert_ascii_field(string): """ Convert an ASCII field into the corresponding list of Unicode strings. The (input) ASCII field is a Unicode string containing one or more ASCII codepoints (``00xx`` or ``U+00xx`` or an ASCII string not starting with ``00`` or ``U+``), separated by a space. :param str string: the (input) ASCII field :rtype: list of Unicode strings """ values = [] for codepoint in [s for s in string.split(DATA_FILE_CODEPOINT_SEPARATOR) if (s != DATA_FILE_VALUE_NOT_AVAILABLE) and (len(s) > 0)]: #if DATA_FILE_CODEPOINT_JOINER in codepoint: # values.append(u"".join([hex_to_unichr(c) for c in codepoint.split(DATA_FILE_CODEPOINT_JOINER)])) if (codepoint.startswith(DATA_FILE_ASCII_NUMERICAL_CODEPOINT_START)) or (codepoint.startswith(DATA_FILE_ASCII_UNICODE_CODEPOINT_START)): values.append(hex_to_unichr(codepoint)) else: values.append(codepoint) return values
python
def convert_ascii_field(string): """ Convert an ASCII field into the corresponding list of Unicode strings. The (input) ASCII field is a Unicode string containing one or more ASCII codepoints (``00xx`` or ``U+00xx`` or an ASCII string not starting with ``00`` or ``U+``), separated by a space. :param str string: the (input) ASCII field :rtype: list of Unicode strings """ values = [] for codepoint in [s for s in string.split(DATA_FILE_CODEPOINT_SEPARATOR) if (s != DATA_FILE_VALUE_NOT_AVAILABLE) and (len(s) > 0)]: #if DATA_FILE_CODEPOINT_JOINER in codepoint: # values.append(u"".join([hex_to_unichr(c) for c in codepoint.split(DATA_FILE_CODEPOINT_JOINER)])) if (codepoint.startswith(DATA_FILE_ASCII_NUMERICAL_CODEPOINT_START)) or (codepoint.startswith(DATA_FILE_ASCII_UNICODE_CODEPOINT_START)): values.append(hex_to_unichr(codepoint)) else: values.append(codepoint) return values
[ "def", "convert_ascii_field", "(", "string", ")", ":", "values", "=", "[", "]", "for", "codepoint", "in", "[", "s", "for", "s", "in", "string", ".", "split", "(", "DATA_FILE_CODEPOINT_SEPARATOR", ")", "if", "(", "s", "!=", "DATA_FILE_VALUE_NOT_AVAILABLE", ")", "and", "(", "len", "(", "s", ")", ">", "0", ")", "]", ":", "#if DATA_FILE_CODEPOINT_JOINER in codepoint:", "# values.append(u\"\".join([hex_to_unichr(c) for c in codepoint.split(DATA_FILE_CODEPOINT_JOINER)]))", "if", "(", "codepoint", ".", "startswith", "(", "DATA_FILE_ASCII_NUMERICAL_CODEPOINT_START", ")", ")", "or", "(", "codepoint", ".", "startswith", "(", "DATA_FILE_ASCII_UNICODE_CODEPOINT_START", ")", ")", ":", "values", ".", "append", "(", "hex_to_unichr", "(", "codepoint", ")", ")", "else", ":", "values", ".", "append", "(", "codepoint", ")", "return", "values" ]
Convert an ASCII field into the corresponding list of Unicode strings. The (input) ASCII field is a Unicode string containing one or more ASCII codepoints (``00xx`` or ``U+00xx`` or an ASCII string not starting with ``00`` or ``U+``), separated by a space. :param str string: the (input) ASCII field :rtype: list of Unicode strings
[ "Convert", "an", "ASCII", "field", "into", "the", "corresponding", "list", "of", "Unicode", "strings", "." ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/data/__init__.py#L71-L91
pettarin/ipapy
ipapy/data/__init__.py
convert_raw_tuple
def convert_raw_tuple(value_tuple, format_string): """ Convert a tuple of raw values, according to the given line format. :param tuple value_tuple: the tuple of raw values :param str format_string: the format of the tuple :rtype: list of tuples """ values = [] for v, c in zip(value_tuple, format_string): if v is None: # append None values.append(v) elif c == u"s": # string values.append(v) elif c == u"S": # string, split using space as delimiter values.append([s for s in v.split(u" ") if len(s) > 0]) elif c == u"i": # int values.append(int(v)) elif c == u"U": # Unicode values.append(convert_unicode_field(v)) elif c == u"A": # ASCII values.append(convert_ascii_field(v)) #elif c == u"x": # # ignore # pass return tuple(values)
python
def convert_raw_tuple(value_tuple, format_string): """ Convert a tuple of raw values, according to the given line format. :param tuple value_tuple: the tuple of raw values :param str format_string: the format of the tuple :rtype: list of tuples """ values = [] for v, c in zip(value_tuple, format_string): if v is None: # append None values.append(v) elif c == u"s": # string values.append(v) elif c == u"S": # string, split using space as delimiter values.append([s for s in v.split(u" ") if len(s) > 0]) elif c == u"i": # int values.append(int(v)) elif c == u"U": # Unicode values.append(convert_unicode_field(v)) elif c == u"A": # ASCII values.append(convert_ascii_field(v)) #elif c == u"x": # # ignore # pass return tuple(values)
[ "def", "convert_raw_tuple", "(", "value_tuple", ",", "format_string", ")", ":", "values", "=", "[", "]", "for", "v", ",", "c", "in", "zip", "(", "value_tuple", ",", "format_string", ")", ":", "if", "v", "is", "None", ":", "# append None", "values", ".", "append", "(", "v", ")", "elif", "c", "==", "u\"s\"", ":", "# string", "values", ".", "append", "(", "v", ")", "elif", "c", "==", "u\"S\"", ":", "# string, split using space as delimiter", "values", ".", "append", "(", "[", "s", "for", "s", "in", "v", ".", "split", "(", "u\" \"", ")", "if", "len", "(", "s", ")", ">", "0", "]", ")", "elif", "c", "==", "u\"i\"", ":", "# int", "values", ".", "append", "(", "int", "(", "v", ")", ")", "elif", "c", "==", "u\"U\"", ":", "# Unicode", "values", ".", "append", "(", "convert_unicode_field", "(", "v", ")", ")", "elif", "c", "==", "u\"A\"", ":", "# ASCII", "values", ".", "append", "(", "convert_ascii_field", "(", "v", ")", ")", "#elif c == u\"x\":", "# # ignore", "# pass", "return", "tuple", "(", "values", ")" ]
Convert a tuple of raw values, according to the given line format. :param tuple value_tuple: the tuple of raw values :param str format_string: the format of the tuple :rtype: list of tuples
[ "Convert", "a", "tuple", "of", "raw", "values", "according", "to", "the", "given", "line", "format", "." ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/data/__init__.py#L93-L124
pettarin/ipapy
ipapy/data/__init__.py
load_data_file
def load_data_file( file_path, file_path_is_relative=False, comment_string=DATA_FILE_COMMENT, field_separator=DATA_FILE_FIELD_SEPARATOR, line_format=None ): """ Load a data file, with one record per line and fields separated by ``field_separator``, returning a list of tuples. It ignores lines starting with ``comment_string`` or empty lines. If ``values_per_line`` is not ``None``, check that each line (tuple) has the prescribed number of values. :param str file_path: path of the data file to load :param bool file_path_is_relative: if ``True``, ``file_path`` is relative to this source code file :param str comment_string: ignore lines starting with this string :param str field_separator: fields are separated by this string :param str line_format: if not ``None``, parses each line according to the given format (``s`` = string, ``S`` = split string using spaces, ``i`` = int, ``x`` = ignore, ``U`` = Unicode, ``A`` = ASCII) :rtype: list of tuples """ raw_tuples = [] if file_path_is_relative: file_path = os.path.join(os.path.dirname(__file__), file_path) with io.open(file_path, "r", encoding="utf-8") as f: for line in f: line = line.strip() if (len(line) > 0) and (not line.startswith(comment_string)): raw_list = line.split(field_separator) if len(raw_list) != len(line_format): raise ValueError("Data file '%s' contains a bad line: '%s'" % (file_path, line)) raw_tuples.append(tuple(raw_list)) if (line_format is None) or (len(line_format) < 1): return raw_tuples return [convert_raw_tuple(t, line_format) for t in raw_tuples]
python
def load_data_file( file_path, file_path_is_relative=False, comment_string=DATA_FILE_COMMENT, field_separator=DATA_FILE_FIELD_SEPARATOR, line_format=None ): """ Load a data file, with one record per line and fields separated by ``field_separator``, returning a list of tuples. It ignores lines starting with ``comment_string`` or empty lines. If ``values_per_line`` is not ``None``, check that each line (tuple) has the prescribed number of values. :param str file_path: path of the data file to load :param bool file_path_is_relative: if ``True``, ``file_path`` is relative to this source code file :param str comment_string: ignore lines starting with this string :param str field_separator: fields are separated by this string :param str line_format: if not ``None``, parses each line according to the given format (``s`` = string, ``S`` = split string using spaces, ``i`` = int, ``x`` = ignore, ``U`` = Unicode, ``A`` = ASCII) :rtype: list of tuples """ raw_tuples = [] if file_path_is_relative: file_path = os.path.join(os.path.dirname(__file__), file_path) with io.open(file_path, "r", encoding="utf-8") as f: for line in f: line = line.strip() if (len(line) > 0) and (not line.startswith(comment_string)): raw_list = line.split(field_separator) if len(raw_list) != len(line_format): raise ValueError("Data file '%s' contains a bad line: '%s'" % (file_path, line)) raw_tuples.append(tuple(raw_list)) if (line_format is None) or (len(line_format) < 1): return raw_tuples return [convert_raw_tuple(t, line_format) for t in raw_tuples]
[ "def", "load_data_file", "(", "file_path", ",", "file_path_is_relative", "=", "False", ",", "comment_string", "=", "DATA_FILE_COMMENT", ",", "field_separator", "=", "DATA_FILE_FIELD_SEPARATOR", ",", "line_format", "=", "None", ")", ":", "raw_tuples", "=", "[", "]", "if", "file_path_is_relative", ":", "file_path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "file_path", ")", "with", "io", ".", "open", "(", "file_path", ",", "\"r\"", ",", "encoding", "=", "\"utf-8\"", ")", "as", "f", ":", "for", "line", "in", "f", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "(", "len", "(", "line", ")", ">", "0", ")", "and", "(", "not", "line", ".", "startswith", "(", "comment_string", ")", ")", ":", "raw_list", "=", "line", ".", "split", "(", "field_separator", ")", "if", "len", "(", "raw_list", ")", "!=", "len", "(", "line_format", ")", ":", "raise", "ValueError", "(", "\"Data file '%s' contains a bad line: '%s'\"", "%", "(", "file_path", ",", "line", ")", ")", "raw_tuples", ".", "append", "(", "tuple", "(", "raw_list", ")", ")", "if", "(", "line_format", "is", "None", ")", "or", "(", "len", "(", "line_format", ")", "<", "1", ")", ":", "return", "raw_tuples", "return", "[", "convert_raw_tuple", "(", "t", ",", "line_format", ")", "for", "t", "in", "raw_tuples", "]" ]
Load a data file, with one record per line and fields separated by ``field_separator``, returning a list of tuples. It ignores lines starting with ``comment_string`` or empty lines. If ``values_per_line`` is not ``None``, check that each line (tuple) has the prescribed number of values. :param str file_path: path of the data file to load :param bool file_path_is_relative: if ``True``, ``file_path`` is relative to this source code file :param str comment_string: ignore lines starting with this string :param str field_separator: fields are separated by this string :param str line_format: if not ``None``, parses each line according to the given format (``s`` = string, ``S`` = split string using spaces, ``i`` = int, ``x`` = ignore, ``U`` = Unicode, ``A`` = ASCII) :rtype: list of tuples
[ "Load", "a", "data", "file", "with", "one", "record", "per", "line", "and", "fields", "separated", "by", "field_separator", "returning", "a", "list", "of", "tuples", "." ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/data/__init__.py#L126-L166
pettarin/ipapy
ipapy/data/__init__.py
load_ipa_data
def load_ipa_data(): """ Load the IPA data from the built-in IPA database, creating the following globals: 1. ``IPA_CHARS``: list of all IPAChar objects 2. ``UNICODE_TO_IPA``: dict mapping a Unicode string (often, a single char) to an IPAChar 3. ``UNICODE_TO_IPA_MAX_KEY_LENGTH``: length of a longest key in ``UNICODE_TO_IPA`` 4. ``IPA_TO_UNICODE``: map an IPAChar canonical representation to the corresponding Unicode string (or char) """ ipa_signs = [] unicode_to_ipa = {} ipa_to_unicode = {} max_key_length = 0 for line in load_data_file( file_path=u"ipa.dat", file_path_is_relative=True, line_format=u"sU" ): # unpack data i_desc, i_unicode_keys = line name = re.sub(r" [ ]*", " ", i_desc) # create a suitable IPACharacter obj if u"consonant" in i_desc: obj = IPAConsonant(name=name, descriptors=i_desc) elif u"vowel" in i_desc: obj = IPAVowel(name=name, descriptors=i_desc) elif u"diacritic" in i_desc: obj = IPADiacritic(name=name, descriptors=i_desc) elif u"suprasegmental" in i_desc: obj = IPASuprasegmental(name=name, descriptors=i_desc) elif u"tone" in i_desc: obj = IPATone(name=name, descriptors=i_desc) else: raise ValueError("The IPA data file contains a bad line, defining an unknown type: '%s'" % (line)) ipa_signs.append(obj) # map Unicode codepoint to object, if the former is available if len(i_unicode_keys) > 0: # canonical Unicode string first_key = i_unicode_keys[0] ipa_to_unicode[obj.canonical_representation] = first_key obj.unicode_repr = first_key max_key_length = max(max_key_length, len(first_key)) # add all Unicode strings for key in i_unicode_keys: if key in unicode_to_ipa: raise ValueError("The IPA data file contains a bad line, redefining codepoint '%s': '%s'" % (key, line)) unicode_to_ipa[key] = obj return ipa_signs, unicode_to_ipa, max_key_length, ipa_to_unicode
python
def load_ipa_data(): """ Load the IPA data from the built-in IPA database, creating the following globals: 1. ``IPA_CHARS``: list of all IPAChar objects 2. ``UNICODE_TO_IPA``: dict mapping a Unicode string (often, a single char) to an IPAChar 3. ``UNICODE_TO_IPA_MAX_KEY_LENGTH``: length of a longest key in ``UNICODE_TO_IPA`` 4. ``IPA_TO_UNICODE``: map an IPAChar canonical representation to the corresponding Unicode string (or char) """ ipa_signs = [] unicode_to_ipa = {} ipa_to_unicode = {} max_key_length = 0 for line in load_data_file( file_path=u"ipa.dat", file_path_is_relative=True, line_format=u"sU" ): # unpack data i_desc, i_unicode_keys = line name = re.sub(r" [ ]*", " ", i_desc) # create a suitable IPACharacter obj if u"consonant" in i_desc: obj = IPAConsonant(name=name, descriptors=i_desc) elif u"vowel" in i_desc: obj = IPAVowel(name=name, descriptors=i_desc) elif u"diacritic" in i_desc: obj = IPADiacritic(name=name, descriptors=i_desc) elif u"suprasegmental" in i_desc: obj = IPASuprasegmental(name=name, descriptors=i_desc) elif u"tone" in i_desc: obj = IPATone(name=name, descriptors=i_desc) else: raise ValueError("The IPA data file contains a bad line, defining an unknown type: '%s'" % (line)) ipa_signs.append(obj) # map Unicode codepoint to object, if the former is available if len(i_unicode_keys) > 0: # canonical Unicode string first_key = i_unicode_keys[0] ipa_to_unicode[obj.canonical_representation] = first_key obj.unicode_repr = first_key max_key_length = max(max_key_length, len(first_key)) # add all Unicode strings for key in i_unicode_keys: if key in unicode_to_ipa: raise ValueError("The IPA data file contains a bad line, redefining codepoint '%s': '%s'" % (key, line)) unicode_to_ipa[key] = obj return ipa_signs, unicode_to_ipa, max_key_length, ipa_to_unicode
[ "def", "load_ipa_data", "(", ")", ":", "ipa_signs", "=", "[", "]", "unicode_to_ipa", "=", "{", "}", "ipa_to_unicode", "=", "{", "}", "max_key_length", "=", "0", "for", "line", "in", "load_data_file", "(", "file_path", "=", "u\"ipa.dat\"", ",", "file_path_is_relative", "=", "True", ",", "line_format", "=", "u\"sU\"", ")", ":", "# unpack data", "i_desc", ",", "i_unicode_keys", "=", "line", "name", "=", "re", ".", "sub", "(", "r\" [ ]*\"", ",", "\" \"", ",", "i_desc", ")", "# create a suitable IPACharacter obj", "if", "u\"consonant\"", "in", "i_desc", ":", "obj", "=", "IPAConsonant", "(", "name", "=", "name", ",", "descriptors", "=", "i_desc", ")", "elif", "u\"vowel\"", "in", "i_desc", ":", "obj", "=", "IPAVowel", "(", "name", "=", "name", ",", "descriptors", "=", "i_desc", ")", "elif", "u\"diacritic\"", "in", "i_desc", ":", "obj", "=", "IPADiacritic", "(", "name", "=", "name", ",", "descriptors", "=", "i_desc", ")", "elif", "u\"suprasegmental\"", "in", "i_desc", ":", "obj", "=", "IPASuprasegmental", "(", "name", "=", "name", ",", "descriptors", "=", "i_desc", ")", "elif", "u\"tone\"", "in", "i_desc", ":", "obj", "=", "IPATone", "(", "name", "=", "name", ",", "descriptors", "=", "i_desc", ")", "else", ":", "raise", "ValueError", "(", "\"The IPA data file contains a bad line, defining an unknown type: '%s'\"", "%", "(", "line", ")", ")", "ipa_signs", ".", "append", "(", "obj", ")", "# map Unicode codepoint to object, if the former is available", "if", "len", "(", "i_unicode_keys", ")", ">", "0", ":", "# canonical Unicode string", "first_key", "=", "i_unicode_keys", "[", "0", "]", "ipa_to_unicode", "[", "obj", ".", "canonical_representation", "]", "=", "first_key", "obj", ".", "unicode_repr", "=", "first_key", "max_key_length", "=", "max", "(", "max_key_length", ",", "len", "(", "first_key", ")", ")", "# add all Unicode strings ", "for", "key", "in", "i_unicode_keys", ":", "if", "key", "in", "unicode_to_ipa", ":", "raise", "ValueError", "(", "\"The IPA data file contains a bad line, redefining codepoint '%s': '%s'\"", "%", "(", "key", ",", "line", ")", ")", "unicode_to_ipa", "[", "key", "]", "=", "obj", "return", "ipa_signs", ",", "unicode_to_ipa", ",", "max_key_length", ",", "ipa_to_unicode" ]
Load the IPA data from the built-in IPA database, creating the following globals: 1. ``IPA_CHARS``: list of all IPAChar objects 2. ``UNICODE_TO_IPA``: dict mapping a Unicode string (often, a single char) to an IPAChar 3. ``UNICODE_TO_IPA_MAX_KEY_LENGTH``: length of a longest key in ``UNICODE_TO_IPA`` 4. ``IPA_TO_UNICODE``: map an IPAChar canonical representation to the corresponding Unicode string (or char)
[ "Load", "the", "IPA", "data", "from", "the", "built", "-", "in", "IPA", "database", "creating", "the", "following", "globals", ":" ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/data/__init__.py#L168-L217
pettarin/ipapy
ipapy/__init__.py
split_using_dictionary
def split_using_dictionary(string, dictionary, max_key_length, single_char_parsing=False): """ Return a list of (non-empty) substrings of the given string, where each substring is either: 1. the longest string starting at the current index that is a key in the dictionary, or 2. a single character that is not a key in the dictionary. If ``single_char_parsing`` is ``False``, parse the string one Unicode character at a time, that is, do not perform the greedy parsing. :param iterable string: the iterable object ("string") to split into atoms :param dict dictionary: the dictionary mapping atoms ("characters") to something else :param int max_key_length: the length of a longest key, in number of characters :param bool single_char_parsing: if ``True``, parse one Unicode character at a time """ def substring(string, i, j): if isinstance(string[i], tuple): # transform list of tuples with one element in a tuple with all elements return tuple([string[k][0] for k in range(i, j)]) # just return substring return string[i:j] if string is None: return None if (single_char_parsing) or (max_key_length < 2): return [c for c in string] acc = [] l = len(string) i = 0 while i < l: found = False for j in range(min(i + max_key_length, l), i, -1): sub = substring(string, i, j) if sub in dictionary: found = True acc.append(sub) i = j break if not found: acc.append(string[i]) i += 1 return acc
python
def split_using_dictionary(string, dictionary, max_key_length, single_char_parsing=False): """ Return a list of (non-empty) substrings of the given string, where each substring is either: 1. the longest string starting at the current index that is a key in the dictionary, or 2. a single character that is not a key in the dictionary. If ``single_char_parsing`` is ``False``, parse the string one Unicode character at a time, that is, do not perform the greedy parsing. :param iterable string: the iterable object ("string") to split into atoms :param dict dictionary: the dictionary mapping atoms ("characters") to something else :param int max_key_length: the length of a longest key, in number of characters :param bool single_char_parsing: if ``True``, parse one Unicode character at a time """ def substring(string, i, j): if isinstance(string[i], tuple): # transform list of tuples with one element in a tuple with all elements return tuple([string[k][0] for k in range(i, j)]) # just return substring return string[i:j] if string is None: return None if (single_char_parsing) or (max_key_length < 2): return [c for c in string] acc = [] l = len(string) i = 0 while i < l: found = False for j in range(min(i + max_key_length, l), i, -1): sub = substring(string, i, j) if sub in dictionary: found = True acc.append(sub) i = j break if not found: acc.append(string[i]) i += 1 return acc
[ "def", "split_using_dictionary", "(", "string", ",", "dictionary", ",", "max_key_length", ",", "single_char_parsing", "=", "False", ")", ":", "def", "substring", "(", "string", ",", "i", ",", "j", ")", ":", "if", "isinstance", "(", "string", "[", "i", "]", ",", "tuple", ")", ":", "# transform list of tuples with one element in a tuple with all elements", "return", "tuple", "(", "[", "string", "[", "k", "]", "[", "0", "]", "for", "k", "in", "range", "(", "i", ",", "j", ")", "]", ")", "# just return substring", "return", "string", "[", "i", ":", "j", "]", "if", "string", "is", "None", ":", "return", "None", "if", "(", "single_char_parsing", ")", "or", "(", "max_key_length", "<", "2", ")", ":", "return", "[", "c", "for", "c", "in", "string", "]", "acc", "=", "[", "]", "l", "=", "len", "(", "string", ")", "i", "=", "0", "while", "i", "<", "l", ":", "found", "=", "False", "for", "j", "in", "range", "(", "min", "(", "i", "+", "max_key_length", ",", "l", ")", ",", "i", ",", "-", "1", ")", ":", "sub", "=", "substring", "(", "string", ",", "i", ",", "j", ")", "if", "sub", "in", "dictionary", ":", "found", "=", "True", "acc", ".", "append", "(", "sub", ")", "i", "=", "j", "break", "if", "not", "found", ":", "acc", ".", "append", "(", "string", "[", "i", "]", ")", "i", "+=", "1", "return", "acc" ]
Return a list of (non-empty) substrings of the given string, where each substring is either: 1. the longest string starting at the current index that is a key in the dictionary, or 2. a single character that is not a key in the dictionary. If ``single_char_parsing`` is ``False``, parse the string one Unicode character at a time, that is, do not perform the greedy parsing. :param iterable string: the iterable object ("string") to split into atoms :param dict dictionary: the dictionary mapping atoms ("characters") to something else :param int max_key_length: the length of a longest key, in number of characters :param bool single_char_parsing: if ``True``, parse one Unicode character at a time
[ "Return", "a", "list", "of", "(", "non", "-", "empty", ")", "substrings", "of", "the", "given", "string", "where", "each", "substring", "is", "either", ":", "1", ".", "the", "longest", "string", "starting", "at", "the", "current", "index", "that", "is", "a", "key", "in", "the", "dictionary", "or", "2", ".", "a", "single", "character", "that", "is", "not", "a", "key", "in", "the", "dictionary", "." ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/__init__.py#L26-L70
pettarin/ipapy
ipapy/__init__.py
ipa_substrings
def ipa_substrings(unicode_string, single_char_parsing=False): """ Return a list of (non-empty) substrings of the given string, where each substring is either: 1. the longest Unicode string starting at the current index representing a (known) valid IPA character, or 2. a single Unicode character (which is not IPA valid). If ``single_char_parsing`` is ``False``, parse the string one Unicode character at a time, that is, do not perform the greedy parsing. For example, if ``s = u"\u006e\u0361\u006d"``, with ``single_char_parsing=True`` the result will be a list with a single element: ``[u"\u006e\u0361\u006d"]``, while ``single_char_parsing=False`` will yield a list with three elements: ``[u"\u006e", u"\u0361", u"\u006d"]``. Return ``None`` if ``unicode_string`` is ``None``. :param str unicode_string: the Unicode string to be parsed :param bool single_char_parsing: if ``True``, parse one Unicode character at a time :rtype: list of str """ return split_using_dictionary( string=unicode_string, dictionary=UNICODE_TO_IPA, max_key_length=UNICODE_TO_IPA_MAX_KEY_LENGTH, single_char_parsing=single_char_parsing )
python
def ipa_substrings(unicode_string, single_char_parsing=False): """ Return a list of (non-empty) substrings of the given string, where each substring is either: 1. the longest Unicode string starting at the current index representing a (known) valid IPA character, or 2. a single Unicode character (which is not IPA valid). If ``single_char_parsing`` is ``False``, parse the string one Unicode character at a time, that is, do not perform the greedy parsing. For example, if ``s = u"\u006e\u0361\u006d"``, with ``single_char_parsing=True`` the result will be a list with a single element: ``[u"\u006e\u0361\u006d"]``, while ``single_char_parsing=False`` will yield a list with three elements: ``[u"\u006e", u"\u0361", u"\u006d"]``. Return ``None`` if ``unicode_string`` is ``None``. :param str unicode_string: the Unicode string to be parsed :param bool single_char_parsing: if ``True``, parse one Unicode character at a time :rtype: list of str """ return split_using_dictionary( string=unicode_string, dictionary=UNICODE_TO_IPA, max_key_length=UNICODE_TO_IPA_MAX_KEY_LENGTH, single_char_parsing=single_char_parsing )
[ "def", "ipa_substrings", "(", "unicode_string", ",", "single_char_parsing", "=", "False", ")", ":", "return", "split_using_dictionary", "(", "string", "=", "unicode_string", ",", "dictionary", "=", "UNICODE_TO_IPA", ",", "max_key_length", "=", "UNICODE_TO_IPA_MAX_KEY_LENGTH", ",", "single_char_parsing", "=", "single_char_parsing", ")" ]
Return a list of (non-empty) substrings of the given string, where each substring is either: 1. the longest Unicode string starting at the current index representing a (known) valid IPA character, or 2. a single Unicode character (which is not IPA valid). If ``single_char_parsing`` is ``False``, parse the string one Unicode character at a time, that is, do not perform the greedy parsing. For example, if ``s = u"\u006e\u0361\u006d"``, with ``single_char_parsing=True`` the result will be a list with a single element: ``[u"\u006e\u0361\u006d"]``, while ``single_char_parsing=False`` will yield a list with three elements: ``[u"\u006e", u"\u0361", u"\u006d"]``. Return ``None`` if ``unicode_string`` is ``None``. :param str unicode_string: the Unicode string to be parsed :param bool single_char_parsing: if ``True``, parse one Unicode character at a time :rtype: list of str
[ "Return", "a", "list", "of", "(", "non", "-", "empty", ")", "substrings", "of", "the", "given", "string", "where", "each", "substring", "is", "either", ":", "1", ".", "the", "longest", "Unicode", "string", "starting", "at", "the", "current", "index", "representing", "a", "(", "known", ")", "valid", "IPA", "character", "or", "2", ".", "a", "single", "Unicode", "character", "(", "which", "is", "not", "IPA", "valid", ")", "." ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/__init__.py#L72-L102
pettarin/ipapy
ipapy/__init__.py
invalid_ipa_characters
def invalid_ipa_characters(unicode_string, indices=False): """ Return the list of Unicode characters in the given Unicode string that are not IPA valid. Return ``None`` if ``unicode_string`` is ``None``. :param str unicode_string: the Unicode string to be parsed :param bool indices: if ``True``, return a list of pairs (index, invalid character), instead of a list of str (characters). :rtype: list of str or list of (int, str) """ if unicode_string is None: return None if indices: return [(i, unicode_string[i]) for i in range(len(unicode_string)) if unicode_string[i] not in UNICODE_TO_IPA] return set([c for c in unicode_string if c not in UNICODE_TO_IPA])
python
def invalid_ipa_characters(unicode_string, indices=False): """ Return the list of Unicode characters in the given Unicode string that are not IPA valid. Return ``None`` if ``unicode_string`` is ``None``. :param str unicode_string: the Unicode string to be parsed :param bool indices: if ``True``, return a list of pairs (index, invalid character), instead of a list of str (characters). :rtype: list of str or list of (int, str) """ if unicode_string is None: return None if indices: return [(i, unicode_string[i]) for i in range(len(unicode_string)) if unicode_string[i] not in UNICODE_TO_IPA] return set([c for c in unicode_string if c not in UNICODE_TO_IPA])
[ "def", "invalid_ipa_characters", "(", "unicode_string", ",", "indices", "=", "False", ")", ":", "if", "unicode_string", "is", "None", ":", "return", "None", "if", "indices", ":", "return", "[", "(", "i", ",", "unicode_string", "[", "i", "]", ")", "for", "i", "in", "range", "(", "len", "(", "unicode_string", ")", ")", "if", "unicode_string", "[", "i", "]", "not", "in", "UNICODE_TO_IPA", "]", "return", "set", "(", "[", "c", "for", "c", "in", "unicode_string", "if", "c", "not", "in", "UNICODE_TO_IPA", "]", ")" ]
Return the list of Unicode characters in the given Unicode string that are not IPA valid. Return ``None`` if ``unicode_string`` is ``None``. :param str unicode_string: the Unicode string to be parsed :param bool indices: if ``True``, return a list of pairs (index, invalid character), instead of a list of str (characters). :rtype: list of str or list of (int, str)
[ "Return", "the", "list", "of", "Unicode", "characters", "in", "the", "given", "Unicode", "string", "that", "are", "not", "IPA", "valid", "." ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/__init__.py#L104-L121
pettarin/ipapy
ipapy/__init__.py
remove_invalid_ipa_characters
def remove_invalid_ipa_characters(unicode_string, return_invalid=False, single_char_parsing=False): """ Remove all Unicode characters that are not IPA valid from the given string, and return a list of substrings of the given string, each mapping to a (known) valid IPA character. Return ``None`` if ``unicode_string`` is ``None``. :param str unicode_string: the Unicode string to be parsed :param bool return_invalid: if ``True``, return a pair ``(valid, invalid)``, where ``invalid`` is a list of Unicode characters that are not IPA valid. :param bool single_char_parsing: if ``True``, parse one Unicode character at a time :rtype: list of str """ if unicode_string is None: return None substrings = ipa_substrings(unicode_string, single_char_parsing=single_char_parsing) valid = [s for s in substrings if s in UNICODE_TO_IPA] if return_invalid: return (valid, [s for s in substrings if s not in UNICODE_TO_IPA]) return valid
python
def remove_invalid_ipa_characters(unicode_string, return_invalid=False, single_char_parsing=False): """ Remove all Unicode characters that are not IPA valid from the given string, and return a list of substrings of the given string, each mapping to a (known) valid IPA character. Return ``None`` if ``unicode_string`` is ``None``. :param str unicode_string: the Unicode string to be parsed :param bool return_invalid: if ``True``, return a pair ``(valid, invalid)``, where ``invalid`` is a list of Unicode characters that are not IPA valid. :param bool single_char_parsing: if ``True``, parse one Unicode character at a time :rtype: list of str """ if unicode_string is None: return None substrings = ipa_substrings(unicode_string, single_char_parsing=single_char_parsing) valid = [s for s in substrings if s in UNICODE_TO_IPA] if return_invalid: return (valid, [s for s in substrings if s not in UNICODE_TO_IPA]) return valid
[ "def", "remove_invalid_ipa_characters", "(", "unicode_string", ",", "return_invalid", "=", "False", ",", "single_char_parsing", "=", "False", ")", ":", "if", "unicode_string", "is", "None", ":", "return", "None", "substrings", "=", "ipa_substrings", "(", "unicode_string", ",", "single_char_parsing", "=", "single_char_parsing", ")", "valid", "=", "[", "s", "for", "s", "in", "substrings", "if", "s", "in", "UNICODE_TO_IPA", "]", "if", "return_invalid", ":", "return", "(", "valid", ",", "[", "s", "for", "s", "in", "substrings", "if", "s", "not", "in", "UNICODE_TO_IPA", "]", ")", "return", "valid" ]
Remove all Unicode characters that are not IPA valid from the given string, and return a list of substrings of the given string, each mapping to a (known) valid IPA character. Return ``None`` if ``unicode_string`` is ``None``. :param str unicode_string: the Unicode string to be parsed :param bool return_invalid: if ``True``, return a pair ``(valid, invalid)``, where ``invalid`` is a list of Unicode characters that are not IPA valid. :param bool single_char_parsing: if ``True``, parse one Unicode character at a time :rtype: list of str
[ "Remove", "all", "Unicode", "characters", "that", "are", "not", "IPA", "valid", "from", "the", "given", "string", "and", "return", "a", "list", "of", "substrings", "of", "the", "given", "string", "each", "mapping", "to", "a", "(", "known", ")", "valid", "IPA", "character", "." ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/__init__.py#L140-L162
pettarin/ipapy
ipapy/ipachar.py
variant_to_list
def variant_to_list(obj): """ Return a list containing the descriptors in the given object. The ``obj`` can be a list or a set of descriptor strings, or a Unicode string. If ``obj`` is a Unicode string, it will be split using spaces as delimiters. :param variant obj: the object to be parsed :rtype: list :raise TypeError: if the ``obj`` has a type not listed above """ if isinstance(obj, list): return obj elif is_unicode_string(obj): return [s for s in obj.split() if len(s) > 0] elif isinstance(obj, set) or isinstance(obj, frozenset): return list(obj) raise TypeError("The given value must be a list or a set of descriptor strings, or a Unicode string.")
python
def variant_to_list(obj): """ Return a list containing the descriptors in the given object. The ``obj`` can be a list or a set of descriptor strings, or a Unicode string. If ``obj`` is a Unicode string, it will be split using spaces as delimiters. :param variant obj: the object to be parsed :rtype: list :raise TypeError: if the ``obj`` has a type not listed above """ if isinstance(obj, list): return obj elif is_unicode_string(obj): return [s for s in obj.split() if len(s) > 0] elif isinstance(obj, set) or isinstance(obj, frozenset): return list(obj) raise TypeError("The given value must be a list or a set of descriptor strings, or a Unicode string.")
[ "def", "variant_to_list", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "list", ")", ":", "return", "obj", "elif", "is_unicode_string", "(", "obj", ")", ":", "return", "[", "s", "for", "s", "in", "obj", ".", "split", "(", ")", "if", "len", "(", "s", ")", ">", "0", "]", "elif", "isinstance", "(", "obj", ",", "set", ")", "or", "isinstance", "(", "obj", ",", "frozenset", ")", ":", "return", "list", "(", "obj", ")", "raise", "TypeError", "(", "\"The given value must be a list or a set of descriptor strings, or a Unicode string.\"", ")" ]
Return a list containing the descriptors in the given object. The ``obj`` can be a list or a set of descriptor strings, or a Unicode string. If ``obj`` is a Unicode string, it will be split using spaces as delimiters. :param variant obj: the object to be parsed :rtype: list :raise TypeError: if the ``obj`` has a type not listed above
[ "Return", "a", "list", "containing", "the", "descriptors", "in", "the", "given", "object", "." ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/ipachar.py#L311-L329
pettarin/ipapy
ipapy/ipachar.py
variant_to_canonical_string
def variant_to_canonical_string(obj): """ Return a list containing the canonical string for the given object. The ``obj`` can be a list or a set of descriptor strings, or a Unicode string. If ``obj`` is a Unicode string, it will be split using spaces as delimiters. :param variant obj: the object to be parsed :rtype: str :raise TypeError: if the ``obj`` has a type not listed above """ acc = [DG_ALL_DESCRIPTORS.canonical_value(p) for p in variant_to_list(obj)] acc = sorted([a for a in acc if a is not None]) return u" ".join(acc)
python
def variant_to_canonical_string(obj): """ Return a list containing the canonical string for the given object. The ``obj`` can be a list or a set of descriptor strings, or a Unicode string. If ``obj`` is a Unicode string, it will be split using spaces as delimiters. :param variant obj: the object to be parsed :rtype: str :raise TypeError: if the ``obj`` has a type not listed above """ acc = [DG_ALL_DESCRIPTORS.canonical_value(p) for p in variant_to_list(obj)] acc = sorted([a for a in acc if a is not None]) return u" ".join(acc)
[ "def", "variant_to_canonical_string", "(", "obj", ")", ":", "acc", "=", "[", "DG_ALL_DESCRIPTORS", ".", "canonical_value", "(", "p", ")", "for", "p", "in", "variant_to_list", "(", "obj", ")", "]", "acc", "=", "sorted", "(", "[", "a", "for", "a", "in", "acc", "if", "a", "is", "not", "None", "]", ")", "return", "u\" \"", ".", "join", "(", "acc", ")" ]
Return a list containing the canonical string for the given object. The ``obj`` can be a list or a set of descriptor strings, or a Unicode string. If ``obj`` is a Unicode string, it will be split using spaces as delimiters. :param variant obj: the object to be parsed :rtype: str :raise TypeError: if the ``obj`` has a type not listed above
[ "Return", "a", "list", "containing", "the", "canonical", "string", "for", "the", "given", "object", "." ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/ipachar.py#L331-L345
pettarin/ipapy
ipapy/ipachar.py
is_list_of_ipachars
def is_list_of_ipachars(obj): """ Return ``True`` if the given object is a list of IPAChar objects. :param object obj: the object to test :rtype: bool """ if isinstance(obj, list): for e in obj: if not isinstance(e, IPAChar): return False return True return False
python
def is_list_of_ipachars(obj): """ Return ``True`` if the given object is a list of IPAChar objects. :param object obj: the object to test :rtype: bool """ if isinstance(obj, list): for e in obj: if not isinstance(e, IPAChar): return False return True return False
[ "def", "is_list_of_ipachars", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "list", ")", ":", "for", "e", "in", "obj", ":", "if", "not", "isinstance", "(", "e", ",", "IPAChar", ")", ":", "return", "False", "return", "True", "return", "False" ]
Return ``True`` if the given object is a list of IPAChar objects. :param object obj: the object to test :rtype: bool
[ "Return", "True", "if", "the", "given", "object", "is", "a", "list", "of", "IPAChar", "objects", "." ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/ipachar.py#L508-L520
pettarin/ipapy
ipapy/ipachar.py
IPAChar.is_equivalent
def is_equivalent(self, other): """ Return ``True`` if the IPA character is equivalent to the ``other`` object. The ``other`` object can be: 1. a Unicode string, containing the representation of the IPA character, 2. a Unicode string, containing a space-separated list of descriptors, 3. a list of Unicode strings, containing descriptors, and 4. another IPAChar. :rtype: bool """ if (self.unicode_repr is not None) and (is_unicode_string(other)) and (self.unicode_repr == other): return True if isinstance(other, IPAChar): return self.canonical_representation == other.canonical_representation try: return self.canonical_representation == IPAChar(name=None, descriptors=other).canonical_representation except: return False
python
def is_equivalent(self, other): """ Return ``True`` if the IPA character is equivalent to the ``other`` object. The ``other`` object can be: 1. a Unicode string, containing the representation of the IPA character, 2. a Unicode string, containing a space-separated list of descriptors, 3. a list of Unicode strings, containing descriptors, and 4. another IPAChar. :rtype: bool """ if (self.unicode_repr is not None) and (is_unicode_string(other)) and (self.unicode_repr == other): return True if isinstance(other, IPAChar): return self.canonical_representation == other.canonical_representation try: return self.canonical_representation == IPAChar(name=None, descriptors=other).canonical_representation except: return False
[ "def", "is_equivalent", "(", "self", ",", "other", ")", ":", "if", "(", "self", ".", "unicode_repr", "is", "not", "None", ")", "and", "(", "is_unicode_string", "(", "other", ")", ")", "and", "(", "self", ".", "unicode_repr", "==", "other", ")", ":", "return", "True", "if", "isinstance", "(", "other", ",", "IPAChar", ")", ":", "return", "self", ".", "canonical_representation", "==", "other", ".", "canonical_representation", "try", ":", "return", "self", ".", "canonical_representation", "==", "IPAChar", "(", "name", "=", "None", ",", "descriptors", "=", "other", ")", ".", "canonical_representation", "except", ":", "return", "False" ]
Return ``True`` if the IPA character is equivalent to the ``other`` object. The ``other`` object can be: 1. a Unicode string, containing the representation of the IPA character, 2. a Unicode string, containing a space-separated list of descriptors, 3. a list of Unicode strings, containing descriptors, and 4. another IPAChar. :rtype: bool
[ "Return", "True", "if", "the", "IPA", "character", "is", "equivalent", "to", "the", "other", "object", "." ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/ipachar.py#L403-L423
pettarin/ipapy
ipapy/ipachar.py
IPAChar.dg_value
def dg_value(self, descriptor_group): """ Return the canonical value of a descriptor of the character, provided it is present in the given descriptor group. If not present, return ``None``. :param IPADescriptorGroup descriptor_group: the descriptor group to be checked against :rtype: str """ for p in self.descriptors: if p in descriptor_group: return descriptor_group.canonical_value(p) return None
python
def dg_value(self, descriptor_group): """ Return the canonical value of a descriptor of the character, provided it is present in the given descriptor group. If not present, return ``None``. :param IPADescriptorGroup descriptor_group: the descriptor group to be checked against :rtype: str """ for p in self.descriptors: if p in descriptor_group: return descriptor_group.canonical_value(p) return None
[ "def", "dg_value", "(", "self", ",", "descriptor_group", ")", ":", "for", "p", "in", "self", ".", "descriptors", ":", "if", "p", "in", "descriptor_group", ":", "return", "descriptor_group", ".", "canonical_value", "(", "p", ")", "return", "None" ]
Return the canonical value of a descriptor of the character, provided it is present in the given descriptor group. If not present, return ``None``. :param IPADescriptorGroup descriptor_group: the descriptor group to be checked against :rtype: str
[ "Return", "the", "canonical", "value", "of", "a", "descriptor", "of", "the", "character", "provided", "it", "is", "present", "in", "the", "given", "descriptor", "group", "." ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/ipachar.py#L480-L493
pettarin/ipapy
ipapy/ipachar.py
IPAChar.has_descriptor
def has_descriptor(self, descriptor): """ Return ``True`` if the character has the given descriptor. :param IPADescriptor descriptor: the descriptor to be checked against :rtype: bool """ for p in self.descriptors: if p in descriptor: return True return False
python
def has_descriptor(self, descriptor): """ Return ``True`` if the character has the given descriptor. :param IPADescriptor descriptor: the descriptor to be checked against :rtype: bool """ for p in self.descriptors: if p in descriptor: return True return False
[ "def", "has_descriptor", "(", "self", ",", "descriptor", ")", ":", "for", "p", "in", "self", ".", "descriptors", ":", "if", "p", "in", "descriptor", ":", "return", "True", "return", "False" ]
Return ``True`` if the character has the given descriptor. :param IPADescriptor descriptor: the descriptor to be checked against :rtype: bool
[ "Return", "True", "if", "the", "character", "has", "the", "given", "descriptor", "." ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/ipachar.py#L495-L505
pettarin/ipapy
ipapy/ipachar.py
IPAConsonant.voicing
def voicing(self, value): """ Set the voicing of the consonant. :param str value: the value to be set """ if (value is not None) and (not value in DG_C_VOICING): raise ValueError("Unrecognized value for voicing: '%s'" % value) self.__voicing = value
python
def voicing(self, value): """ Set the voicing of the consonant. :param str value: the value to be set """ if (value is not None) and (not value in DG_C_VOICING): raise ValueError("Unrecognized value for voicing: '%s'" % value) self.__voicing = value
[ "def", "voicing", "(", "self", ",", "value", ")", ":", "if", "(", "value", "is", "not", "None", ")", "and", "(", "not", "value", "in", "DG_C_VOICING", ")", ":", "raise", "ValueError", "(", "\"Unrecognized value for voicing: '%s'\"", "%", "value", ")", "self", ".", "__voicing", "=", "value" ]
Set the voicing of the consonant. :param str value: the value to be set
[ "Set", "the", "voicing", "of", "the", "consonant", "." ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/ipachar.py#L641-L649
pettarin/ipapy
ipapy/ipachar.py
IPAConsonant.place
def place(self, value): """ Set the place of articulation of the consonant. :param str value: the value to be set """ if (value is not None) and (not value in DG_C_PLACE): raise ValueError("Unrecognized value for place: '%s'" % value) self.__place = value
python
def place(self, value): """ Set the place of articulation of the consonant. :param str value: the value to be set """ if (value is not None) and (not value in DG_C_PLACE): raise ValueError("Unrecognized value for place: '%s'" % value) self.__place = value
[ "def", "place", "(", "self", ",", "value", ")", ":", "if", "(", "value", "is", "not", "None", ")", "and", "(", "not", "value", "in", "DG_C_PLACE", ")", ":", "raise", "ValueError", "(", "\"Unrecognized value for place: '%s'\"", "%", "value", ")", "self", ".", "__place", "=", "value" ]
Set the place of articulation of the consonant. :param str value: the value to be set
[ "Set", "the", "place", "of", "articulation", "of", "the", "consonant", "." ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/ipachar.py#L660-L668
pettarin/ipapy
ipapy/ipachar.py
IPAConsonant.manner
def manner(self, value): """ Set the manner of articulation of the consonant. :param str value: the value to be set """ if (value is not None) and (not value in DG_C_MANNER): raise ValueError("Unrecognized value for manner: '%s'" % value) self.__manner = value
python
def manner(self, value): """ Set the manner of articulation of the consonant. :param str value: the value to be set """ if (value is not None) and (not value in DG_C_MANNER): raise ValueError("Unrecognized value for manner: '%s'" % value) self.__manner = value
[ "def", "manner", "(", "self", ",", "value", ")", ":", "if", "(", "value", "is", "not", "None", ")", "and", "(", "not", "value", "in", "DG_C_MANNER", ")", ":", "raise", "ValueError", "(", "\"Unrecognized value for manner: '%s'\"", "%", "value", ")", "self", ".", "__manner", "=", "value" ]
Set the manner of articulation of the consonant. :param str value: the value to be set
[ "Set", "the", "manner", "of", "articulation", "of", "the", "consonant", "." ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/ipachar.py#L679-L687
pettarin/ipapy
ipapy/ipachar.py
IPAVowel.height
def height(self, value): """ Set the height of the vowel. :param str value: the value to be set """ if (value is not None) and (not value in DG_V_HEIGHT): raise ValueError("Unrecognized value for height: '%s'" % value) self.__height = value
python
def height(self, value): """ Set the height of the vowel. :param str value: the value to be set """ if (value is not None) and (not value in DG_V_HEIGHT): raise ValueError("Unrecognized value for height: '%s'" % value) self.__height = value
[ "def", "height", "(", "self", ",", "value", ")", ":", "if", "(", "value", "is", "not", "None", ")", "and", "(", "not", "value", "in", "DG_V_HEIGHT", ")", ":", "raise", "ValueError", "(", "\"Unrecognized value for height: '%s'\"", "%", "value", ")", "self", ".", "__height", "=", "value" ]
Set the height of the vowel. :param str value: the value to be set
[ "Set", "the", "height", "of", "the", "vowel", "." ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/ipachar.py#L778-L786
pettarin/ipapy
ipapy/ipachar.py
IPAVowel.backness
def backness(self, value): """ Set the backness of the vowel. :param str value: the value to be set """ if (value is not None) and (not value in DG_V_BACKNESS): raise ValueError("Unrecognized value for backness: '%s'" % value) self.__backness = value
python
def backness(self, value): """ Set the backness of the vowel. :param str value: the value to be set """ if (value is not None) and (not value in DG_V_BACKNESS): raise ValueError("Unrecognized value for backness: '%s'" % value) self.__backness = value
[ "def", "backness", "(", "self", ",", "value", ")", ":", "if", "(", "value", "is", "not", "None", ")", "and", "(", "not", "value", "in", "DG_V_BACKNESS", ")", ":", "raise", "ValueError", "(", "\"Unrecognized value for backness: '%s'\"", "%", "value", ")", "self", ".", "__backness", "=", "value" ]
Set the backness of the vowel. :param str value: the value to be set
[ "Set", "the", "backness", "of", "the", "vowel", "." ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/ipachar.py#L797-L805
pettarin/ipapy
ipapy/ipachar.py
IPAVowel.roundness
def roundness(self, value): """ Set the roundness of the vowel. :param str value: the value to be set """ if (value is not None) and (not value in DG_V_ROUNDNESS): raise ValueError("Unrecognized value for roundness: '%s'" % value) self.__roundness = value
python
def roundness(self, value): """ Set the roundness of the vowel. :param str value: the value to be set """ if (value is not None) and (not value in DG_V_ROUNDNESS): raise ValueError("Unrecognized value for roundness: '%s'" % value) self.__roundness = value
[ "def", "roundness", "(", "self", ",", "value", ")", ":", "if", "(", "value", "is", "not", "None", ")", "and", "(", "not", "value", "in", "DG_V_ROUNDNESS", ")", ":", "raise", "ValueError", "(", "\"Unrecognized value for roundness: '%s'\"", "%", "value", ")", "self", ".", "__roundness", "=", "value" ]
Set the roundness of the vowel. :param str value: the value to be set
[ "Set", "the", "roundness", "of", "the", "vowel", "." ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/ipachar.py#L816-L824
pettarin/ipapy
ipapy/kirshenbaummapper.py
KirshenbaumMapper._load_data
def _load_data(self): """ Load the Kirshenbaum ASCII IPA data from the built-in database. """ ipa_canonical_string_to_ascii_str = dict() for line in load_data_file( file_path=self.DATA_FILE_PATH, file_path_is_relative=True, line_format=u"sxA" ): i_desc, i_ascii = line if len(i_ascii) == 0: raise ValueError("Data file '%s' contains a bad line: '%s'" % (self.DATA_FILE_PATH, line)) key = (variant_to_canonical_string(i_desc),) ipa_canonical_string_to_ascii_str[key] = i_ascii[0] return ipa_canonical_string_to_ascii_str
python
def _load_data(self): """ Load the Kirshenbaum ASCII IPA data from the built-in database. """ ipa_canonical_string_to_ascii_str = dict() for line in load_data_file( file_path=self.DATA_FILE_PATH, file_path_is_relative=True, line_format=u"sxA" ): i_desc, i_ascii = line if len(i_ascii) == 0: raise ValueError("Data file '%s' contains a bad line: '%s'" % (self.DATA_FILE_PATH, line)) key = (variant_to_canonical_string(i_desc),) ipa_canonical_string_to_ascii_str[key] = i_ascii[0] return ipa_canonical_string_to_ascii_str
[ "def", "_load_data", "(", "self", ")", ":", "ipa_canonical_string_to_ascii_str", "=", "dict", "(", ")", "for", "line", "in", "load_data_file", "(", "file_path", "=", "self", ".", "DATA_FILE_PATH", ",", "file_path_is_relative", "=", "True", ",", "line_format", "=", "u\"sxA\"", ")", ":", "i_desc", ",", "i_ascii", "=", "line", "if", "len", "(", "i_ascii", ")", "==", "0", ":", "raise", "ValueError", "(", "\"Data file '%s' contains a bad line: '%s'\"", "%", "(", "self", ".", "DATA_FILE_PATH", ",", "line", ")", ")", "key", "=", "(", "variant_to_canonical_string", "(", "i_desc", ")", ",", ")", "ipa_canonical_string_to_ascii_str", "[", "key", "]", "=", "i_ascii", "[", "0", "]", "return", "ipa_canonical_string_to_ascii_str" ]
Load the Kirshenbaum ASCII IPA data from the built-in database.
[ "Load", "the", "Kirshenbaum", "ASCII", "IPA", "data", "from", "the", "built", "-", "in", "database", "." ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/kirshenbaummapper.py#L29-L44
pettarin/ipapy
ipapy/ipadescriptor.py
IPADescriptorGroup.canonical_value
def canonical_value(self, query): """ Return the canonical value corresponding to the given query value. Return ``None`` if the query value is not present in any descriptor of the group. :param str query: the descriptor value to be checked against """ for d in self.descriptors: if query in d: return d.canonical_label return None
python
def canonical_value(self, query): """ Return the canonical value corresponding to the given query value. Return ``None`` if the query value is not present in any descriptor of the group. :param str query: the descriptor value to be checked against """ for d in self.descriptors: if query in d: return d.canonical_label return None
[ "def", "canonical_value", "(", "self", ",", "query", ")", ":", "for", "d", "in", "self", ".", "descriptors", ":", "if", "query", "in", "d", ":", "return", "d", ".", "canonical_label", "return", "None" ]
Return the canonical value corresponding to the given query value. Return ``None`` if the query value is not present in any descriptor of the group. :param str query: the descriptor value to be checked against
[ "Return", "the", "canonical", "value", "corresponding", "to", "the", "given", "query", "value", "." ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/ipadescriptor.py#L133-L144
pettarin/ipapy
ipapy/arpabetmapper.py
ARPABETMapper._load_data
def _load_data(self): """ Load the ARPABET ASCII IPA data from the built-in database. """ ipa_canonical_string_to_ascii_str = dict() for line in load_data_file( file_path=self.DATA_FILE_PATH, file_path_is_relative=True, line_format=u"UA" ): i_unicode, i_ascii = line if (len(i_unicode) == 0) or (len(i_ascii) == 0): raise ValueError("Data file '%s' contains a bad line: '%s'" % (self.DATA_FILE_PATH, line)) i_unicode = i_unicode[0] i_ascii = i_ascii[0] key = tuple([UNICODE_TO_IPA[c].canonical_representation for c in i_unicode]) ipa_canonical_string_to_ascii_str[key] = i_ascii return ipa_canonical_string_to_ascii_str
python
def _load_data(self): """ Load the ARPABET ASCII IPA data from the built-in database. """ ipa_canonical_string_to_ascii_str = dict() for line in load_data_file( file_path=self.DATA_FILE_PATH, file_path_is_relative=True, line_format=u"UA" ): i_unicode, i_ascii = line if (len(i_unicode) == 0) or (len(i_ascii) == 0): raise ValueError("Data file '%s' contains a bad line: '%s'" % (self.DATA_FILE_PATH, line)) i_unicode = i_unicode[0] i_ascii = i_ascii[0] key = tuple([UNICODE_TO_IPA[c].canonical_representation for c in i_unicode]) ipa_canonical_string_to_ascii_str[key] = i_ascii return ipa_canonical_string_to_ascii_str
[ "def", "_load_data", "(", "self", ")", ":", "ipa_canonical_string_to_ascii_str", "=", "dict", "(", ")", "for", "line", "in", "load_data_file", "(", "file_path", "=", "self", ".", "DATA_FILE_PATH", ",", "file_path_is_relative", "=", "True", ",", "line_format", "=", "u\"UA\"", ")", ":", "i_unicode", ",", "i_ascii", "=", "line", "if", "(", "len", "(", "i_unicode", ")", "==", "0", ")", "or", "(", "len", "(", "i_ascii", ")", "==", "0", ")", ":", "raise", "ValueError", "(", "\"Data file '%s' contains a bad line: '%s'\"", "%", "(", "self", ".", "DATA_FILE_PATH", ",", "line", ")", ")", "i_unicode", "=", "i_unicode", "[", "0", "]", "i_ascii", "=", "i_ascii", "[", "0", "]", "key", "=", "tuple", "(", "[", "UNICODE_TO_IPA", "[", "c", "]", ".", "canonical_representation", "for", "c", "in", "i_unicode", "]", ")", "ipa_canonical_string_to_ascii_str", "[", "key", "]", "=", "i_ascii", "return", "ipa_canonical_string_to_ascii_str" ]
Load the ARPABET ASCII IPA data from the built-in database.
[ "Load", "the", "ARPABET", "ASCII", "IPA", "data", "from", "the", "built", "-", "in", "database", "." ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/arpabetmapper.py#L29-L46
pettarin/ipapy
ipapy/compatibility.py
is_unicode_string
def is_unicode_string(string): """ Return ``True`` if the given string is a Unicode string, that is, of type ``unicode`` in Python 2 or ``str`` in Python 3. Return ``None`` if ``string`` is ``None``. :param str string: the string to be checked :rtype: bool """ if string is None: return None if PY2: return isinstance(string, unicode) return isinstance(string, str)
python
def is_unicode_string(string): """ Return ``True`` if the given string is a Unicode string, that is, of type ``unicode`` in Python 2 or ``str`` in Python 3. Return ``None`` if ``string`` is ``None``. :param str string: the string to be checked :rtype: bool """ if string is None: return None if PY2: return isinstance(string, unicode) return isinstance(string, str)
[ "def", "is_unicode_string", "(", "string", ")", ":", "if", "string", "is", "None", ":", "return", "None", "if", "PY2", ":", "return", "isinstance", "(", "string", ",", "unicode", ")", "return", "isinstance", "(", "string", ",", "str", ")" ]
Return ``True`` if the given string is a Unicode string, that is, of type ``unicode`` in Python 2 or ``str`` in Python 3. Return ``None`` if ``string`` is ``None``. :param str string: the string to be checked :rtype: bool
[ "Return", "True", "if", "the", "given", "string", "is", "a", "Unicode", "string", "that", "is", "of", "type", "unicode", "in", "Python", "2", "or", "str", "in", "Python", "3", "." ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/compatibility.py#L25-L39
pettarin/ipapy
ipapy/compatibility.py
to_unicode_string
def to_unicode_string(string): """ Return a Unicode string out of the given string. On Python 2, it calls ``unicode`` with ``utf-8`` encoding. On Python 3, it just returns the given string. Return ``None`` if ``string`` is ``None``. :param str string: the string to convert to Unicode :rtype: (Unicode) str """ if string is None: return None if is_unicode_string(string): return string # if reached here, string is a byte string if PY2: return unicode(string, encoding="utf-8") return string.decode(encoding="utf-8")
python
def to_unicode_string(string): """ Return a Unicode string out of the given string. On Python 2, it calls ``unicode`` with ``utf-8`` encoding. On Python 3, it just returns the given string. Return ``None`` if ``string`` is ``None``. :param str string: the string to convert to Unicode :rtype: (Unicode) str """ if string is None: return None if is_unicode_string(string): return string # if reached here, string is a byte string if PY2: return unicode(string, encoding="utf-8") return string.decode(encoding="utf-8")
[ "def", "to_unicode_string", "(", "string", ")", ":", "if", "string", "is", "None", ":", "return", "None", "if", "is_unicode_string", "(", "string", ")", ":", "return", "string", "# if reached here, string is a byte string ", "if", "PY2", ":", "return", "unicode", "(", "string", ",", "encoding", "=", "\"utf-8\"", ")", "return", "string", ".", "decode", "(", "encoding", "=", "\"utf-8\"", ")" ]
Return a Unicode string out of the given string. On Python 2, it calls ``unicode`` with ``utf-8`` encoding. On Python 3, it just returns the given string. Return ``None`` if ``string`` is ``None``. :param str string: the string to convert to Unicode :rtype: (Unicode) str
[ "Return", "a", "Unicode", "string", "out", "of", "the", "given", "string", ".", "On", "Python", "2", "it", "calls", "unicode", "with", "utf", "-", "8", "encoding", ".", "On", "Python", "3", "it", "just", "returns", "the", "given", "string", "." ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/compatibility.py#L41-L60
pettarin/ipapy
ipapy/compatibility.py
to_str
def to_str(string): """ Return the given string (either byte string or Unicode string) converted to native-str, that is, a byte string on Python 2, or a Unicode string on Python 3. Return ``None`` if ``string`` is ``None``. :param str string: the string to convert to native-str :rtype: native-str """ if string is None: return None if isinstance(string, str): return string if PY2: return string.encode("utf-8") return string.decode("utf-8")
python
def to_str(string): """ Return the given string (either byte string or Unicode string) converted to native-str, that is, a byte string on Python 2, or a Unicode string on Python 3. Return ``None`` if ``string`` is ``None``. :param str string: the string to convert to native-str :rtype: native-str """ if string is None: return None if isinstance(string, str): return string if PY2: return string.encode("utf-8") return string.decode("utf-8")
[ "def", "to_str", "(", "string", ")", ":", "if", "string", "is", "None", ":", "return", "None", "if", "isinstance", "(", "string", ",", "str", ")", ":", "return", "string", "if", "PY2", ":", "return", "string", ".", "encode", "(", "\"utf-8\"", ")", "return", "string", ".", "decode", "(", "\"utf-8\"", ")" ]
Return the given string (either byte string or Unicode string) converted to native-str, that is, a byte string on Python 2, or a Unicode string on Python 3. Return ``None`` if ``string`` is ``None``. :param str string: the string to convert to native-str :rtype: native-str
[ "Return", "the", "given", "string", "(", "either", "byte", "string", "or", "Unicode", "string", ")", "converted", "to", "native", "-", "str", "that", "is", "a", "byte", "string", "on", "Python", "2", "or", "a", "Unicode", "string", "on", "Python", "3", "." ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/compatibility.py#L62-L79
pettarin/ipapy
ipapy/compatibility.py
hex_to_unichr
def hex_to_unichr(hex_string): """ Return the Unicode character with the given codepoint, given as an hexadecimal string. Return ``None`` if ``hex_string`` is ``None`` or is empty. Example:: "0061" => a "U+0061" => a :param str hex_string: the Unicode codepoint of the desired character :rtype: (Unicode) str """ if (hex_string is None) or (len(hex_string) < 1): return None if hex_string.startswith("U+"): hex_string = hex_string[2:] return int_to_unichr(int(hex_string, base=16))
python
def hex_to_unichr(hex_string): """ Return the Unicode character with the given codepoint, given as an hexadecimal string. Return ``None`` if ``hex_string`` is ``None`` or is empty. Example:: "0061" => a "U+0061" => a :param str hex_string: the Unicode codepoint of the desired character :rtype: (Unicode) str """ if (hex_string is None) or (len(hex_string) < 1): return None if hex_string.startswith("U+"): hex_string = hex_string[2:] return int_to_unichr(int(hex_string, base=16))
[ "def", "hex_to_unichr", "(", "hex_string", ")", ":", "if", "(", "hex_string", "is", "None", ")", "or", "(", "len", "(", "hex_string", ")", "<", "1", ")", ":", "return", "None", "if", "hex_string", ".", "startswith", "(", "\"U+\"", ")", ":", "hex_string", "=", "hex_string", "[", "2", ":", "]", "return", "int_to_unichr", "(", "int", "(", "hex_string", ",", "base", "=", "16", ")", ")" ]
Return the Unicode character with the given codepoint, given as an hexadecimal string. Return ``None`` if ``hex_string`` is ``None`` or is empty. Example:: "0061" => a "U+0061" => a :param str hex_string: the Unicode codepoint of the desired character :rtype: (Unicode) str
[ "Return", "the", "Unicode", "character", "with", "the", "given", "codepoint", "given", "as", "an", "hexadecimal", "string", "." ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/compatibility.py#L96-L114
pettarin/ipapy
ipapy/compatibility.py
unicode_to_hex
def unicode_to_hex(unicode_string): """ Return a string containing the Unicode hexadecimal codepoint of each Unicode character in the given Unicode string. Return ``None`` if ``unicode_string`` is ``None``. Example:: a => U+0061 ab => U+0061 U+0062 :param str unicode_string: the Unicode string to convert :rtype: (Unicode) str """ if unicode_string is None: return None acc = [] for c in unicode_string: s = hex(ord(c)).replace("0x", "").upper() acc.append("U+" + ("0" * (4 - len(s))) + s) return u" ".join(acc)
python
def unicode_to_hex(unicode_string): """ Return a string containing the Unicode hexadecimal codepoint of each Unicode character in the given Unicode string. Return ``None`` if ``unicode_string`` is ``None``. Example:: a => U+0061 ab => U+0061 U+0062 :param str unicode_string: the Unicode string to convert :rtype: (Unicode) str """ if unicode_string is None: return None acc = [] for c in unicode_string: s = hex(ord(c)).replace("0x", "").upper() acc.append("U+" + ("0" * (4 - len(s))) + s) return u" ".join(acc)
[ "def", "unicode_to_hex", "(", "unicode_string", ")", ":", "if", "unicode_string", "is", "None", ":", "return", "None", "acc", "=", "[", "]", "for", "c", "in", "unicode_string", ":", "s", "=", "hex", "(", "ord", "(", "c", ")", ")", ".", "replace", "(", "\"0x\"", ",", "\"\"", ")", ".", "upper", "(", ")", "acc", ".", "append", "(", "\"U+\"", "+", "(", "\"0\"", "*", "(", "4", "-", "len", "(", "s", ")", ")", ")", "+", "s", ")", "return", "u\" \"", ".", "join", "(", "acc", ")" ]
Return a string containing the Unicode hexadecimal codepoint of each Unicode character in the given Unicode string. Return ``None`` if ``unicode_string`` is ``None``. Example:: a => U+0061 ab => U+0061 U+0062 :param str unicode_string: the Unicode string to convert :rtype: (Unicode) str
[ "Return", "a", "string", "containing", "the", "Unicode", "hexadecimal", "codepoint", "of", "each", "Unicode", "character", "in", "the", "given", "Unicode", "string", "." ]
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/compatibility.py#L116-L136
TeamHG-Memex/html-text
html_text/html_text.py
parse_html
def parse_html(html): """ Create an lxml.html.HtmlElement from a string with html. XXX: mostly copy-pasted from parsel.selector.create_root_node """ body = html.strip().replace('\x00', '').encode('utf8') or b'<html/>' parser = lxml.html.HTMLParser(recover=True, encoding='utf8') root = lxml.etree.fromstring(body, parser=parser) if root is None: root = lxml.etree.fromstring(b'<html/>', parser=parser) return root
python
def parse_html(html): """ Create an lxml.html.HtmlElement from a string with html. XXX: mostly copy-pasted from parsel.selector.create_root_node """ body = html.strip().replace('\x00', '').encode('utf8') or b'<html/>' parser = lxml.html.HTMLParser(recover=True, encoding='utf8') root = lxml.etree.fromstring(body, parser=parser) if root is None: root = lxml.etree.fromstring(b'<html/>', parser=parser) return root
[ "def", "parse_html", "(", "html", ")", ":", "body", "=", "html", ".", "strip", "(", ")", ".", "replace", "(", "'\\x00'", ",", "''", ")", ".", "encode", "(", "'utf8'", ")", "or", "b'<html/>'", "parser", "=", "lxml", ".", "html", ".", "HTMLParser", "(", "recover", "=", "True", ",", "encoding", "=", "'utf8'", ")", "root", "=", "lxml", ".", "etree", ".", "fromstring", "(", "body", ",", "parser", "=", "parser", ")", "if", "root", "is", "None", ":", "root", "=", "lxml", ".", "etree", ".", "fromstring", "(", "b'<html/>'", ",", "parser", "=", "parser", ")", "return", "root" ]
Create an lxml.html.HtmlElement from a string with html. XXX: mostly copy-pasted from parsel.selector.create_root_node
[ "Create", "an", "lxml", ".", "html", ".", "HtmlElement", "from", "a", "string", "with", "html", ".", "XXX", ":", "mostly", "copy", "-", "pasted", "from", "parsel", ".", "selector", ".", "create_root_node" ]
train
https://github.com/TeamHG-Memex/html-text/blob/871d4dbe9f4f99e5f041110c60458adcaae6fab4/html_text/html_text.py#L45-L54
TeamHG-Memex/html-text
html_text/html_text.py
etree_to_text
def etree_to_text(tree, guess_punct_space=True, guess_layout=True, newline_tags=NEWLINE_TAGS, double_newline_tags=DOUBLE_NEWLINE_TAGS): """ Convert a html tree to text. Tree should be cleaned with ``html_text.html_text.cleaner.clean_html`` before passing to this function. See html_text.extract_text docstring for description of the approach and options. """ chunks = [] _NEWLINE = object() _DOUBLE_NEWLINE = object() class Context: """ workaround for missing `nonlocal` in Python 2 """ # _NEWLINE, _DOUBLE_NEWLINE or content of the previous chunk (str) prev = _DOUBLE_NEWLINE def should_add_space(text, prev): """ Return True if extra whitespace should be added before text """ if prev in {_NEWLINE, _DOUBLE_NEWLINE}: return False if not _has_trailing_whitespace(prev): if _has_punct_after(text) or _has_open_bracket_before(prev): return False return True def get_space_between(text, prev): if not text or not guess_punct_space: return ' ' return ' ' if should_add_space(text, prev) else '' def add_newlines(tag, context): if not guess_layout: return prev = context.prev if prev is _DOUBLE_NEWLINE: # don't output more than 1 blank line return if tag in double_newline_tags: context.prev = _DOUBLE_NEWLINE chunks.append('\n' if prev is _NEWLINE else '\n\n') elif tag in newline_tags: context.prev = _NEWLINE if prev is not _NEWLINE: chunks.append('\n') def add_text(text_content, context): text = _normalize_whitespace(text_content) if text_content else '' if not text: return space = get_space_between(text, context.prev) chunks.extend([space, text]) context.prev = text_content def traverse_text_fragments(tree, context, handle_tail=True): """ Extract text from the ``tree``: fill ``chunks`` variable """ add_newlines(tree.tag, context) add_text(tree.text, context) for child in tree: traverse_text_fragments(child, context) add_newlines(tree.tag, context) if handle_tail: add_text(tree.tail, context) traverse_text_fragments(tree, context=Context(), handle_tail=False) return ''.join(chunks).strip()
python
def etree_to_text(tree, guess_punct_space=True, guess_layout=True, newline_tags=NEWLINE_TAGS, double_newline_tags=DOUBLE_NEWLINE_TAGS): """ Convert a html tree to text. Tree should be cleaned with ``html_text.html_text.cleaner.clean_html`` before passing to this function. See html_text.extract_text docstring for description of the approach and options. """ chunks = [] _NEWLINE = object() _DOUBLE_NEWLINE = object() class Context: """ workaround for missing `nonlocal` in Python 2 """ # _NEWLINE, _DOUBLE_NEWLINE or content of the previous chunk (str) prev = _DOUBLE_NEWLINE def should_add_space(text, prev): """ Return True if extra whitespace should be added before text """ if prev in {_NEWLINE, _DOUBLE_NEWLINE}: return False if not _has_trailing_whitespace(prev): if _has_punct_after(text) or _has_open_bracket_before(prev): return False return True def get_space_between(text, prev): if not text or not guess_punct_space: return ' ' return ' ' if should_add_space(text, prev) else '' def add_newlines(tag, context): if not guess_layout: return prev = context.prev if prev is _DOUBLE_NEWLINE: # don't output more than 1 blank line return if tag in double_newline_tags: context.prev = _DOUBLE_NEWLINE chunks.append('\n' if prev is _NEWLINE else '\n\n') elif tag in newline_tags: context.prev = _NEWLINE if prev is not _NEWLINE: chunks.append('\n') def add_text(text_content, context): text = _normalize_whitespace(text_content) if text_content else '' if not text: return space = get_space_between(text, context.prev) chunks.extend([space, text]) context.prev = text_content def traverse_text_fragments(tree, context, handle_tail=True): """ Extract text from the ``tree``: fill ``chunks`` variable """ add_newlines(tree.tag, context) add_text(tree.text, context) for child in tree: traverse_text_fragments(child, context) add_newlines(tree.tag, context) if handle_tail: add_text(tree.tail, context) traverse_text_fragments(tree, context=Context(), handle_tail=False) return ''.join(chunks).strip()
[ "def", "etree_to_text", "(", "tree", ",", "guess_punct_space", "=", "True", ",", "guess_layout", "=", "True", ",", "newline_tags", "=", "NEWLINE_TAGS", ",", "double_newline_tags", "=", "DOUBLE_NEWLINE_TAGS", ")", ":", "chunks", "=", "[", "]", "_NEWLINE", "=", "object", "(", ")", "_DOUBLE_NEWLINE", "=", "object", "(", ")", "class", "Context", ":", "\"\"\" workaround for missing `nonlocal` in Python 2 \"\"\"", "# _NEWLINE, _DOUBLE_NEWLINE or content of the previous chunk (str)", "prev", "=", "_DOUBLE_NEWLINE", "def", "should_add_space", "(", "text", ",", "prev", ")", ":", "\"\"\" Return True if extra whitespace should be added before text \"\"\"", "if", "prev", "in", "{", "_NEWLINE", ",", "_DOUBLE_NEWLINE", "}", ":", "return", "False", "if", "not", "_has_trailing_whitespace", "(", "prev", ")", ":", "if", "_has_punct_after", "(", "text", ")", "or", "_has_open_bracket_before", "(", "prev", ")", ":", "return", "False", "return", "True", "def", "get_space_between", "(", "text", ",", "prev", ")", ":", "if", "not", "text", "or", "not", "guess_punct_space", ":", "return", "' '", "return", "' '", "if", "should_add_space", "(", "text", ",", "prev", ")", "else", "''", "def", "add_newlines", "(", "tag", ",", "context", ")", ":", "if", "not", "guess_layout", ":", "return", "prev", "=", "context", ".", "prev", "if", "prev", "is", "_DOUBLE_NEWLINE", ":", "# don't output more than 1 blank line", "return", "if", "tag", "in", "double_newline_tags", ":", "context", ".", "prev", "=", "_DOUBLE_NEWLINE", "chunks", ".", "append", "(", "'\\n'", "if", "prev", "is", "_NEWLINE", "else", "'\\n\\n'", ")", "elif", "tag", "in", "newline_tags", ":", "context", ".", "prev", "=", "_NEWLINE", "if", "prev", "is", "not", "_NEWLINE", ":", "chunks", ".", "append", "(", "'\\n'", ")", "def", "add_text", "(", "text_content", ",", "context", ")", ":", "text", "=", "_normalize_whitespace", "(", "text_content", ")", "if", "text_content", "else", "''", "if", "not", "text", ":", "return", "space", "=", "get_space_between", "(", "text", ",", "context", ".", "prev", ")", "chunks", ".", "extend", "(", "[", "space", ",", "text", "]", ")", "context", ".", "prev", "=", "text_content", "def", "traverse_text_fragments", "(", "tree", ",", "context", ",", "handle_tail", "=", "True", ")", ":", "\"\"\" Extract text from the ``tree``: fill ``chunks`` variable \"\"\"", "add_newlines", "(", "tree", ".", "tag", ",", "context", ")", "add_text", "(", "tree", ".", "text", ",", "context", ")", "for", "child", "in", "tree", ":", "traverse_text_fragments", "(", "child", ",", "context", ")", "add_newlines", "(", "tree", ".", "tag", ",", "context", ")", "if", "handle_tail", ":", "add_text", "(", "tree", ".", "tail", ",", "context", ")", "traverse_text_fragments", "(", "tree", ",", "context", "=", "Context", "(", ")", ",", "handle_tail", "=", "False", ")", "return", "''", ".", "join", "(", "chunks", ")", ".", "strip", "(", ")" ]
Convert a html tree to text. Tree should be cleaned with ``html_text.html_text.cleaner.clean_html`` before passing to this function. See html_text.extract_text docstring for description of the approach and options.
[ "Convert", "a", "html", "tree", "to", "text", ".", "Tree", "should", "be", "cleaned", "with", "html_text", ".", "html_text", ".", "cleaner", ".", "clean_html", "before", "passing", "to", "this", "function", "." ]
train
https://github.com/TeamHG-Memex/html-text/blob/871d4dbe9f4f99e5f041110c60458adcaae6fab4/html_text/html_text.py#L67-L137
TeamHG-Memex/html-text
html_text/html_text.py
selector_to_text
def selector_to_text(sel, guess_punct_space=True, guess_layout=True): """ Convert a cleaned parsel.Selector to text. See html_text.extract_text docstring for description of the approach and options. """ import parsel if isinstance(sel, parsel.SelectorList): # if selecting a specific xpath text = [] for s in sel: extracted = etree_to_text( s.root, guess_punct_space=guess_punct_space, guess_layout=guess_layout) if extracted: text.append(extracted) return ' '.join(text) else: return etree_to_text( sel.root, guess_punct_space=guess_punct_space, guess_layout=guess_layout)
python
def selector_to_text(sel, guess_punct_space=True, guess_layout=True): """ Convert a cleaned parsel.Selector to text. See html_text.extract_text docstring for description of the approach and options. """ import parsel if isinstance(sel, parsel.SelectorList): # if selecting a specific xpath text = [] for s in sel: extracted = etree_to_text( s.root, guess_punct_space=guess_punct_space, guess_layout=guess_layout) if extracted: text.append(extracted) return ' '.join(text) else: return etree_to_text( sel.root, guess_punct_space=guess_punct_space, guess_layout=guess_layout)
[ "def", "selector_to_text", "(", "sel", ",", "guess_punct_space", "=", "True", ",", "guess_layout", "=", "True", ")", ":", "import", "parsel", "if", "isinstance", "(", "sel", ",", "parsel", ".", "SelectorList", ")", ":", "# if selecting a specific xpath", "text", "=", "[", "]", "for", "s", "in", "sel", ":", "extracted", "=", "etree_to_text", "(", "s", ".", "root", ",", "guess_punct_space", "=", "guess_punct_space", ",", "guess_layout", "=", "guess_layout", ")", "if", "extracted", ":", "text", ".", "append", "(", "extracted", ")", "return", "' '", ".", "join", "(", "text", ")", "else", ":", "return", "etree_to_text", "(", "sel", ".", "root", ",", "guess_punct_space", "=", "guess_punct_space", ",", "guess_layout", "=", "guess_layout", ")" ]
Convert a cleaned parsel.Selector to text. See html_text.extract_text docstring for description of the approach and options.
[ "Convert", "a", "cleaned", "parsel", ".", "Selector", "to", "text", ".", "See", "html_text", ".", "extract_text", "docstring", "for", "description", "of", "the", "approach", "and", "options", "." ]
train
https://github.com/TeamHG-Memex/html-text/blob/871d4dbe9f4f99e5f041110c60458adcaae6fab4/html_text/html_text.py#L140-L161
TeamHG-Memex/html-text
html_text/html_text.py
cleaned_selector
def cleaned_selector(html): """ Clean parsel.selector. """ import parsel try: tree = _cleaned_html_tree(html) sel = parsel.Selector(root=tree, type='html') except (lxml.etree.XMLSyntaxError, lxml.etree.ParseError, lxml.etree.ParserError, UnicodeEncodeError): # likely plain text sel = parsel.Selector(html) return sel
python
def cleaned_selector(html): """ Clean parsel.selector. """ import parsel try: tree = _cleaned_html_tree(html) sel = parsel.Selector(root=tree, type='html') except (lxml.etree.XMLSyntaxError, lxml.etree.ParseError, lxml.etree.ParserError, UnicodeEncodeError): # likely plain text sel = parsel.Selector(html) return sel
[ "def", "cleaned_selector", "(", "html", ")", ":", "import", "parsel", "try", ":", "tree", "=", "_cleaned_html_tree", "(", "html", ")", "sel", "=", "parsel", ".", "Selector", "(", "root", "=", "tree", ",", "type", "=", "'html'", ")", "except", "(", "lxml", ".", "etree", ".", "XMLSyntaxError", ",", "lxml", ".", "etree", ".", "ParseError", ",", "lxml", ".", "etree", ".", "ParserError", ",", "UnicodeEncodeError", ")", ":", "# likely plain text", "sel", "=", "parsel", ".", "Selector", "(", "html", ")", "return", "sel" ]
Clean parsel.selector.
[ "Clean", "parsel", ".", "selector", "." ]
train
https://github.com/TeamHG-Memex/html-text/blob/871d4dbe9f4f99e5f041110c60458adcaae6fab4/html_text/html_text.py#L164-L177
TeamHG-Memex/html-text
html_text/html_text.py
extract_text
def extract_text(html, guess_punct_space=True, guess_layout=True, newline_tags=NEWLINE_TAGS, double_newline_tags=DOUBLE_NEWLINE_TAGS): """ Convert html to text, cleaning invisible content such as styles. Almost the same as normalize-space xpath, but this also adds spaces between inline elements (like <span>) which are often used as block elements in html markup, and adds appropriate newlines to make output better formatted. html should be a unicode string or an already parsed lxml.html element. ``html_text.etree_to_text`` is a lower-level function which only accepts an already parsed lxml.html Element, and is not doing html cleaning itself. When guess_punct_space is True (default), no extra whitespace is added for punctuation. This has a slight (around 10%) performance overhead and is just a heuristic. When guess_layout is True (default), a newline is added before and after ``newline_tags`` and two newlines are added before and after ``double_newline_tags``. This heuristic makes the extracted text more similar to how it is rendered in the browser. Default newline and double newline tags can be found in `html_text.NEWLINE_TAGS` and `html_text.DOUBLE_NEWLINE_TAGS`. """ if html is None: return '' cleaned = _cleaned_html_tree(html) return etree_to_text( cleaned, guess_punct_space=guess_punct_space, guess_layout=guess_layout, newline_tags=newline_tags, double_newline_tags=double_newline_tags, )
python
def extract_text(html, guess_punct_space=True, guess_layout=True, newline_tags=NEWLINE_TAGS, double_newline_tags=DOUBLE_NEWLINE_TAGS): """ Convert html to text, cleaning invisible content such as styles. Almost the same as normalize-space xpath, but this also adds spaces between inline elements (like <span>) which are often used as block elements in html markup, and adds appropriate newlines to make output better formatted. html should be a unicode string or an already parsed lxml.html element. ``html_text.etree_to_text`` is a lower-level function which only accepts an already parsed lxml.html Element, and is not doing html cleaning itself. When guess_punct_space is True (default), no extra whitespace is added for punctuation. This has a slight (around 10%) performance overhead and is just a heuristic. When guess_layout is True (default), a newline is added before and after ``newline_tags`` and two newlines are added before and after ``double_newline_tags``. This heuristic makes the extracted text more similar to how it is rendered in the browser. Default newline and double newline tags can be found in `html_text.NEWLINE_TAGS` and `html_text.DOUBLE_NEWLINE_TAGS`. """ if html is None: return '' cleaned = _cleaned_html_tree(html) return etree_to_text( cleaned, guess_punct_space=guess_punct_space, guess_layout=guess_layout, newline_tags=newline_tags, double_newline_tags=double_newline_tags, )
[ "def", "extract_text", "(", "html", ",", "guess_punct_space", "=", "True", ",", "guess_layout", "=", "True", ",", "newline_tags", "=", "NEWLINE_TAGS", ",", "double_newline_tags", "=", "DOUBLE_NEWLINE_TAGS", ")", ":", "if", "html", "is", "None", ":", "return", "''", "cleaned", "=", "_cleaned_html_tree", "(", "html", ")", "return", "etree_to_text", "(", "cleaned", ",", "guess_punct_space", "=", "guess_punct_space", ",", "guess_layout", "=", "guess_layout", ",", "newline_tags", "=", "newline_tags", ",", "double_newline_tags", "=", "double_newline_tags", ",", ")" ]
Convert html to text, cleaning invisible content such as styles. Almost the same as normalize-space xpath, but this also adds spaces between inline elements (like <span>) which are often used as block elements in html markup, and adds appropriate newlines to make output better formatted. html should be a unicode string or an already parsed lxml.html element. ``html_text.etree_to_text`` is a lower-level function which only accepts an already parsed lxml.html Element, and is not doing html cleaning itself. When guess_punct_space is True (default), no extra whitespace is added for punctuation. This has a slight (around 10%) performance overhead and is just a heuristic. When guess_layout is True (default), a newline is added before and after ``newline_tags`` and two newlines are added before and after ``double_newline_tags``. This heuristic makes the extracted text more similar to how it is rendered in the browser. Default newline and double newline tags can be found in `html_text.NEWLINE_TAGS` and `html_text.DOUBLE_NEWLINE_TAGS`.
[ "Convert", "html", "to", "text", "cleaning", "invisible", "content", "such", "as", "styles", "." ]
train
https://github.com/TeamHG-Memex/html-text/blob/871d4dbe9f4f99e5f041110c60458adcaae6fab4/html_text/html_text.py#L180-L219
Schwanksta/python-arcgis-rest-query
arcgis/arcgis.py
ArcGIS.get_json
def get_json(self, layer, where="1 = 1", fields=[], count_only=False, srid='4326'): """ Gets the JSON file from ArcGIS """ params = { 'where': where, 'outFields': ", ".join(fields), 'returnGeometry': True, 'outSR': srid, 'f': "pjson", 'orderByFields': self.object_id_field, 'returnCountOnly': count_only } if self.token: params['token'] = self.token if self.geom_type: params.update({'geometryType': self.geom_type}) response = requests.get(self._build_query_request(layer), params=params) return response.json()
python
def get_json(self, layer, where="1 = 1", fields=[], count_only=False, srid='4326'): """ Gets the JSON file from ArcGIS """ params = { 'where': where, 'outFields': ", ".join(fields), 'returnGeometry': True, 'outSR': srid, 'f': "pjson", 'orderByFields': self.object_id_field, 'returnCountOnly': count_only } if self.token: params['token'] = self.token if self.geom_type: params.update({'geometryType': self.geom_type}) response = requests.get(self._build_query_request(layer), params=params) return response.json()
[ "def", "get_json", "(", "self", ",", "layer", ",", "where", "=", "\"1 = 1\"", ",", "fields", "=", "[", "]", ",", "count_only", "=", "False", ",", "srid", "=", "'4326'", ")", ":", "params", "=", "{", "'where'", ":", "where", ",", "'outFields'", ":", "\", \"", ".", "join", "(", "fields", ")", ",", "'returnGeometry'", ":", "True", ",", "'outSR'", ":", "srid", ",", "'f'", ":", "\"pjson\"", ",", "'orderByFields'", ":", "self", ".", "object_id_field", ",", "'returnCountOnly'", ":", "count_only", "}", "if", "self", ".", "token", ":", "params", "[", "'token'", "]", "=", "self", ".", "token", "if", "self", ".", "geom_type", ":", "params", ".", "update", "(", "{", "'geometryType'", ":", "self", ".", "geom_type", "}", ")", "response", "=", "requests", ".", "get", "(", "self", ".", "_build_query_request", "(", "layer", ")", ",", "params", "=", "params", ")", "return", "response", ".", "json", "(", ")" ]
Gets the JSON file from ArcGIS
[ "Gets", "the", "JSON", "file", "from", "ArcGIS" ]
train
https://github.com/Schwanksta/python-arcgis-rest-query/blob/020d17f5dfb63d7be4e2e245771453f2ae9410aa/arcgis/arcgis.py#L86-L104
Schwanksta/python-arcgis-rest-query
arcgis/arcgis.py
ArcGIS.get_descriptor_for_layer
def get_descriptor_for_layer(self, layer): """ Returns the standard JSON descriptor for the layer. There is a lot of usefule information in there. """ if not layer in self._layer_descriptor_cache: params = {'f': 'pjson'} if self.token: params['token'] = self.token response = requests.get(self._build_request(layer), params=params) self._layer_descriptor_cache[layer] = response.json() return self._layer_descriptor_cache[layer]
python
def get_descriptor_for_layer(self, layer): """ Returns the standard JSON descriptor for the layer. There is a lot of usefule information in there. """ if not layer in self._layer_descriptor_cache: params = {'f': 'pjson'} if self.token: params['token'] = self.token response = requests.get(self._build_request(layer), params=params) self._layer_descriptor_cache[layer] = response.json() return self._layer_descriptor_cache[layer]
[ "def", "get_descriptor_for_layer", "(", "self", ",", "layer", ")", ":", "if", "not", "layer", "in", "self", ".", "_layer_descriptor_cache", ":", "params", "=", "{", "'f'", ":", "'pjson'", "}", "if", "self", ".", "token", ":", "params", "[", "'token'", "]", "=", "self", ".", "token", "response", "=", "requests", ".", "get", "(", "self", ".", "_build_request", "(", "layer", ")", ",", "params", "=", "params", ")", "self", ".", "_layer_descriptor_cache", "[", "layer", "]", "=", "response", ".", "json", "(", ")", "return", "self", ".", "_layer_descriptor_cache", "[", "layer", "]" ]
Returns the standard JSON descriptor for the layer. There is a lot of usefule information in there.
[ "Returns", "the", "standard", "JSON", "descriptor", "for", "the", "layer", ".", "There", "is", "a", "lot", "of", "usefule", "information", "in", "there", "." ]
train
https://github.com/Schwanksta/python-arcgis-rest-query/blob/020d17f5dfb63d7be4e2e245771453f2ae9410aa/arcgis/arcgis.py#L106-L117
Schwanksta/python-arcgis-rest-query
arcgis/arcgis.py
ArcGIS.enumerate_layer_fields
def enumerate_layer_fields(self, layer): """ Pulls out all of the field names for a layer. """ descriptor = self.get_descriptor_for_layer(layer) return [field['name'] for field in descriptor['fields']]
python
def enumerate_layer_fields(self, layer): """ Pulls out all of the field names for a layer. """ descriptor = self.get_descriptor_for_layer(layer) return [field['name'] for field in descriptor['fields']]
[ "def", "enumerate_layer_fields", "(", "self", ",", "layer", ")", ":", "descriptor", "=", "self", ".", "get_descriptor_for_layer", "(", "layer", ")", "return", "[", "field", "[", "'name'", "]", "for", "field", "in", "descriptor", "[", "'fields'", "]", "]" ]
Pulls out all of the field names for a layer.
[ "Pulls", "out", "all", "of", "the", "field", "names", "for", "a", "layer", "." ]
train
https://github.com/Schwanksta/python-arcgis-rest-query/blob/020d17f5dfb63d7be4e2e245771453f2ae9410aa/arcgis/arcgis.py#L119-L124
Schwanksta/python-arcgis-rest-query
arcgis/arcgis.py
ArcGIS.get
def get(self, layer, where="1 = 1", fields=[], count_only=False, srid='4326'): """ Gets a layer and returns it as honest to God GeoJSON. WHERE 1 = 1 causes us to get everything. We use OBJECTID in the WHERE clause to paginate, so don't use OBJECTID in your WHERE clause unless you're going to query under 1000 objects. """ base_where = where # By default we grab all of the fields. Technically I think # we can just do "*" for all fields, but I found this was buggy in # the KMZ mode. I'd rather be explicit. fields = fields or self.enumerate_layer_fields(layer) jsobj = self.get_json(layer, where, fields, count_only, srid) # Sometimes you just want to know how far there is to go. if count_only: return jsobj.get('count') # If there is no geometry, we default to assuming it's a Table type # data format, and we dump a simple (non-geo) json of all of the data. if not jsobj.get('geometryType', None): return self.getTable(layer, where, fields, jsobj=jsobj) # From what I can tell, the entire layer tends to be of the same type, # so we only have to determine the parsing function once. geom_parser = self._determine_geom_parser(jsobj.get('geometryType')) features = [] # We always want to run once, and then break out as soon as we stop # getting exceededTransferLimit. while True: features += [self.esri_to_geojson(feat, geom_parser) for feat in jsobj.get('features')] if jsobj.get('exceededTransferLimit', False) == False: break # If we've hit the transfer limit we offset by the last OBJECTID # returned and keep moving along. where = "%s > %s" % (self.object_id_field, features[-1]['properties'].get(self.object_id_field)) if base_where != "1 = 1" : # If we have another WHERE filter we needed to tack that back on. where += " AND %s" % base_where jsobj = self.get_json(layer, where, fields, count_only, srid) return { 'type': "FeatureCollection", 'features': features }
python
def get(self, layer, where="1 = 1", fields=[], count_only=False, srid='4326'): """ Gets a layer and returns it as honest to God GeoJSON. WHERE 1 = 1 causes us to get everything. We use OBJECTID in the WHERE clause to paginate, so don't use OBJECTID in your WHERE clause unless you're going to query under 1000 objects. """ base_where = where # By default we grab all of the fields. Technically I think # we can just do "*" for all fields, but I found this was buggy in # the KMZ mode. I'd rather be explicit. fields = fields or self.enumerate_layer_fields(layer) jsobj = self.get_json(layer, where, fields, count_only, srid) # Sometimes you just want to know how far there is to go. if count_only: return jsobj.get('count') # If there is no geometry, we default to assuming it's a Table type # data format, and we dump a simple (non-geo) json of all of the data. if not jsobj.get('geometryType', None): return self.getTable(layer, where, fields, jsobj=jsobj) # From what I can tell, the entire layer tends to be of the same type, # so we only have to determine the parsing function once. geom_parser = self._determine_geom_parser(jsobj.get('geometryType')) features = [] # We always want to run once, and then break out as soon as we stop # getting exceededTransferLimit. while True: features += [self.esri_to_geojson(feat, geom_parser) for feat in jsobj.get('features')] if jsobj.get('exceededTransferLimit', False) == False: break # If we've hit the transfer limit we offset by the last OBJECTID # returned and keep moving along. where = "%s > %s" % (self.object_id_field, features[-1]['properties'].get(self.object_id_field)) if base_where != "1 = 1" : # If we have another WHERE filter we needed to tack that back on. where += " AND %s" % base_where jsobj = self.get_json(layer, where, fields, count_only, srid) return { 'type': "FeatureCollection", 'features': features }
[ "def", "get", "(", "self", ",", "layer", ",", "where", "=", "\"1 = 1\"", ",", "fields", "=", "[", "]", ",", "count_only", "=", "False", ",", "srid", "=", "'4326'", ")", ":", "base_where", "=", "where", "# By default we grab all of the fields. Technically I think", "# we can just do \"*\" for all fields, but I found this was buggy in", "# the KMZ mode. I'd rather be explicit.", "fields", "=", "fields", "or", "self", ".", "enumerate_layer_fields", "(", "layer", ")", "jsobj", "=", "self", ".", "get_json", "(", "layer", ",", "where", ",", "fields", ",", "count_only", ",", "srid", ")", "# Sometimes you just want to know how far there is to go.", "if", "count_only", ":", "return", "jsobj", ".", "get", "(", "'count'", ")", "# If there is no geometry, we default to assuming it's a Table type", "# data format, and we dump a simple (non-geo) json of all of the data.", "if", "not", "jsobj", ".", "get", "(", "'geometryType'", ",", "None", ")", ":", "return", "self", ".", "getTable", "(", "layer", ",", "where", ",", "fields", ",", "jsobj", "=", "jsobj", ")", "# From what I can tell, the entire layer tends to be of the same type,", "# so we only have to determine the parsing function once.", "geom_parser", "=", "self", ".", "_determine_geom_parser", "(", "jsobj", ".", "get", "(", "'geometryType'", ")", ")", "features", "=", "[", "]", "# We always want to run once, and then break out as soon as we stop", "# getting exceededTransferLimit.", "while", "True", ":", "features", "+=", "[", "self", ".", "esri_to_geojson", "(", "feat", ",", "geom_parser", ")", "for", "feat", "in", "jsobj", ".", "get", "(", "'features'", ")", "]", "if", "jsobj", ".", "get", "(", "'exceededTransferLimit'", ",", "False", ")", "==", "False", ":", "break", "# If we've hit the transfer limit we offset by the last OBJECTID", "# returned and keep moving along.", "where", "=", "\"%s > %s\"", "%", "(", "self", ".", "object_id_field", ",", "features", "[", "-", "1", "]", "[", "'properties'", "]", ".", "get", "(", "self", ".", "object_id_field", ")", ")", "if", "base_where", "!=", "\"1 = 1\"", ":", "# If we have another WHERE filter we needed to tack that back on.", "where", "+=", "\" AND %s\"", "%", "base_where", "jsobj", "=", "self", ".", "get_json", "(", "layer", ",", "where", ",", "fields", ",", "count_only", ",", "srid", ")", "return", "{", "'type'", ":", "\"FeatureCollection\"", ",", "'features'", ":", "features", "}" ]
Gets a layer and returns it as honest to God GeoJSON. WHERE 1 = 1 causes us to get everything. We use OBJECTID in the WHERE clause to paginate, so don't use OBJECTID in your WHERE clause unless you're going to query under 1000 objects.
[ "Gets", "a", "layer", "and", "returns", "it", "as", "honest", "to", "God", "GeoJSON", "." ]
train
https://github.com/Schwanksta/python-arcgis-rest-query/blob/020d17f5dfb63d7be4e2e245771453f2ae9410aa/arcgis/arcgis.py#L126-L173
Schwanksta/python-arcgis-rest-query
arcgis/arcgis.py
ArcGIS.getTable
def getTable(self, layer, where="1 = 1", fields=[], jsobj=None): """ Returns JSON for a Table type. You shouldn't use this directly -- it's an automatic falback from .get if there is no geometry """ base_where = where features = [] # We always want to run once, and then break out as soon as we stop # getting exceededTransferLimit. while True: features += [feat.get('attributes') for feat in jsobj.get('features')] # There isn't an exceededTransferLimit? if len(jsobj.get('features')) < 1000: break # If we've hit the transfer limit we offset by the last OBJECTID # returned and keep moving along. where = "%s > %s" % (self.object_id_field, features[-1].get(self.object_id_field)) if base_where != "1 = 1" : # If we have another WHERE filter we needed to tack that back on. where += " AND %s" % base_where jsobj = self.get_json(layer, where, fields) return features
python
def getTable(self, layer, where="1 = 1", fields=[], jsobj=None): """ Returns JSON for a Table type. You shouldn't use this directly -- it's an automatic falback from .get if there is no geometry """ base_where = where features = [] # We always want to run once, and then break out as soon as we stop # getting exceededTransferLimit. while True: features += [feat.get('attributes') for feat in jsobj.get('features')] # There isn't an exceededTransferLimit? if len(jsobj.get('features')) < 1000: break # If we've hit the transfer limit we offset by the last OBJECTID # returned and keep moving along. where = "%s > %s" % (self.object_id_field, features[-1].get(self.object_id_field)) if base_where != "1 = 1" : # If we have another WHERE filter we needed to tack that back on. where += " AND %s" % base_where jsobj = self.get_json(layer, where, fields) return features
[ "def", "getTable", "(", "self", ",", "layer", ",", "where", "=", "\"1 = 1\"", ",", "fields", "=", "[", "]", ",", "jsobj", "=", "None", ")", ":", "base_where", "=", "where", "features", "=", "[", "]", "# We always want to run once, and then break out as soon as we stop", "# getting exceededTransferLimit.", "while", "True", ":", "features", "+=", "[", "feat", ".", "get", "(", "'attributes'", ")", "for", "feat", "in", "jsobj", ".", "get", "(", "'features'", ")", "]", "# There isn't an exceededTransferLimit?", "if", "len", "(", "jsobj", ".", "get", "(", "'features'", ")", ")", "<", "1000", ":", "break", "# If we've hit the transfer limit we offset by the last OBJECTID", "# returned and keep moving along.", "where", "=", "\"%s > %s\"", "%", "(", "self", ".", "object_id_field", ",", "features", "[", "-", "1", "]", ".", "get", "(", "self", ".", "object_id_field", ")", ")", "if", "base_where", "!=", "\"1 = 1\"", ":", "# If we have another WHERE filter we needed to tack that back on.", "where", "+=", "\" AND %s\"", "%", "base_where", "jsobj", "=", "self", ".", "get_json", "(", "layer", ",", "where", ",", "fields", ")", "return", "features" ]
Returns JSON for a Table type. You shouldn't use this directly -- it's an automatic falback from .get if there is no geometry
[ "Returns", "JSON", "for", "a", "Table", "type", ".", "You", "shouldn", "t", "use", "this", "directly", "--", "it", "s", "an", "automatic", "falback", "from", ".", "get", "if", "there", "is", "no", "geometry" ]
train
https://github.com/Schwanksta/python-arcgis-rest-query/blob/020d17f5dfb63d7be4e2e245771453f2ae9410aa/arcgis/arcgis.py#L175-L196
Schwanksta/python-arcgis-rest-query
arcgis/arcgis.py
ArcGIS.getMultiple
def getMultiple(self, layers, where="1 = 1", fields=[], srid='4326', layer_name_field=None): """ Get a bunch of layers and concatenate them together into one. This is useful if you have a map with layers for, say, every year named stuff_2014, stuff_2013, stuff_2012. Etc. Optionally, you can stuff the source layer name into a field of your choosing. >>> arc.getMultiple([0, 3, 5], layer_name_field='layer_src_name') """ features = [] for layer in layers: get_fields = fields or self.enumerate_layer_fields(layer) this_layer = self.get(layer, where, get_fields, False, srid).get('features') if layer_name_field: descriptor = self.get_descriptor_for_layer(layer) layer_name = descriptor.get('name') for feature in this_layer: feature['properties'][layer_name_field] = layer_name features += this_layer return { 'type': "FeatureCollection", 'features': features }
python
def getMultiple(self, layers, where="1 = 1", fields=[], srid='4326', layer_name_field=None): """ Get a bunch of layers and concatenate them together into one. This is useful if you have a map with layers for, say, every year named stuff_2014, stuff_2013, stuff_2012. Etc. Optionally, you can stuff the source layer name into a field of your choosing. >>> arc.getMultiple([0, 3, 5], layer_name_field='layer_src_name') """ features = [] for layer in layers: get_fields = fields or self.enumerate_layer_fields(layer) this_layer = self.get(layer, where, get_fields, False, srid).get('features') if layer_name_field: descriptor = self.get_descriptor_for_layer(layer) layer_name = descriptor.get('name') for feature in this_layer: feature['properties'][layer_name_field] = layer_name features += this_layer return { 'type': "FeatureCollection", 'features': features }
[ "def", "getMultiple", "(", "self", ",", "layers", ",", "where", "=", "\"1 = 1\"", ",", "fields", "=", "[", "]", ",", "srid", "=", "'4326'", ",", "layer_name_field", "=", "None", ")", ":", "features", "=", "[", "]", "for", "layer", "in", "layers", ":", "get_fields", "=", "fields", "or", "self", ".", "enumerate_layer_fields", "(", "layer", ")", "this_layer", "=", "self", ".", "get", "(", "layer", ",", "where", ",", "get_fields", ",", "False", ",", "srid", ")", ".", "get", "(", "'features'", ")", "if", "layer_name_field", ":", "descriptor", "=", "self", ".", "get_descriptor_for_layer", "(", "layer", ")", "layer_name", "=", "descriptor", ".", "get", "(", "'name'", ")", "for", "feature", "in", "this_layer", ":", "feature", "[", "'properties'", "]", "[", "layer_name_field", "]", "=", "layer_name", "features", "+=", "this_layer", "return", "{", "'type'", ":", "\"FeatureCollection\"", ",", "'features'", ":", "features", "}" ]
Get a bunch of layers and concatenate them together into one. This is useful if you have a map with layers for, say, every year named stuff_2014, stuff_2013, stuff_2012. Etc. Optionally, you can stuff the source layer name into a field of your choosing. >>> arc.getMultiple([0, 3, 5], layer_name_field='layer_src_name')
[ "Get", "a", "bunch", "of", "layers", "and", "concatenate", "them", "together", "into", "one", ".", "This", "is", "useful", "if", "you", "have", "a", "map", "with", "layers", "for", "say", "every", "year", "named", "stuff_2014", "stuff_2013", "stuff_2012", ".", "Etc", "." ]
train
https://github.com/Schwanksta/python-arcgis-rest-query/blob/020d17f5dfb63d7be4e2e245771453f2ae9410aa/arcgis/arcgis.py#L198-L221
common-workflow-language/workflow-service
wes_client/util.py
get_version
def get_version(extension, workflow_file): '''Determines the version of a .py, .wdl, or .cwl file.''' if extension == 'py' and two_seven_compatible(workflow_file): return '2.7' elif extension == 'cwl': return yaml.load(open(workflow_file))['cwlVersion'] else: # Must be a wdl file. # Borrowed from https://github.com/Sage-Bionetworks/synapse-orchestrator/blob/develop/synorchestrator/util.py#L142 try: return [l.lstrip('version') for l in workflow_file.splitlines() if 'version' in l.split(' ')][0] except IndexError: return 'draft-2'
python
def get_version(extension, workflow_file): '''Determines the version of a .py, .wdl, or .cwl file.''' if extension == 'py' and two_seven_compatible(workflow_file): return '2.7' elif extension == 'cwl': return yaml.load(open(workflow_file))['cwlVersion'] else: # Must be a wdl file. # Borrowed from https://github.com/Sage-Bionetworks/synapse-orchestrator/blob/develop/synorchestrator/util.py#L142 try: return [l.lstrip('version') for l in workflow_file.splitlines() if 'version' in l.split(' ')][0] except IndexError: return 'draft-2'
[ "def", "get_version", "(", "extension", ",", "workflow_file", ")", ":", "if", "extension", "==", "'py'", "and", "two_seven_compatible", "(", "workflow_file", ")", ":", "return", "'2.7'", "elif", "extension", "==", "'cwl'", ":", "return", "yaml", ".", "load", "(", "open", "(", "workflow_file", ")", ")", "[", "'cwlVersion'", "]", "else", ":", "# Must be a wdl file.", "# Borrowed from https://github.com/Sage-Bionetworks/synapse-orchestrator/blob/develop/synorchestrator/util.py#L142", "try", ":", "return", "[", "l", ".", "lstrip", "(", "'version'", ")", "for", "l", "in", "workflow_file", ".", "splitlines", "(", ")", "if", "'version'", "in", "l", ".", "split", "(", "' '", ")", "]", "[", "0", "]", "except", "IndexError", ":", "return", "'draft-2'" ]
Determines the version of a .py, .wdl, or .cwl file.
[ "Determines", "the", "version", "of", "a", ".", "py", ".", "wdl", "or", ".", "cwl", "file", "." ]
train
https://github.com/common-workflow-language/workflow-service/blob/e879604b65c55546e4f87be1c9df9903a3e0b896/wes_client/util.py#L27-L38
common-workflow-language/workflow-service
wes_client/util.py
wf_info
def wf_info(workflow_path): """ Returns the version of the file and the file extension. Assumes that the file path is to the file directly ie, ends with a valid file extension.Supports checking local files as well as files at http:// and https:// locations. Files at these remote locations are recreated locally to enable our approach to version checking, then removed after version is extracted. """ supported_formats = ['py', 'wdl', 'cwl'] file_type = workflow_path.lower().split('.')[-1] # Grab the file extension workflow_path = workflow_path if ':' in workflow_path else 'file://' + workflow_path if file_type in supported_formats: if workflow_path.startswith('file://'): version = get_version(file_type, workflow_path[7:]) elif workflow_path.startswith('https://') or workflow_path.startswith('http://'): # If file not local go fetch it. html = urlopen(workflow_path).read() local_loc = os.path.join(os.getcwd(), 'fetchedFromRemote.' + file_type) with open(local_loc, 'w') as f: f.write(html.decode()) version = wf_info('file://' + local_loc)[0] # Don't take the file_type here, found it above. os.remove(local_loc) # TODO: Find a way to avoid recreating file before version determination. else: raise NotImplementedError('Unsupported workflow file location: {}. Must be local or HTTP(S).'.format(workflow_path)) else: raise TypeError('Unsupported workflow type: .{}. Must be {}.'.format(file_type, '.py, .cwl, or .wdl')) return version, file_type.upper()
python
def wf_info(workflow_path): """ Returns the version of the file and the file extension. Assumes that the file path is to the file directly ie, ends with a valid file extension.Supports checking local files as well as files at http:// and https:// locations. Files at these remote locations are recreated locally to enable our approach to version checking, then removed after version is extracted. """ supported_formats = ['py', 'wdl', 'cwl'] file_type = workflow_path.lower().split('.')[-1] # Grab the file extension workflow_path = workflow_path if ':' in workflow_path else 'file://' + workflow_path if file_type in supported_formats: if workflow_path.startswith('file://'): version = get_version(file_type, workflow_path[7:]) elif workflow_path.startswith('https://') or workflow_path.startswith('http://'): # If file not local go fetch it. html = urlopen(workflow_path).read() local_loc = os.path.join(os.getcwd(), 'fetchedFromRemote.' + file_type) with open(local_loc, 'w') as f: f.write(html.decode()) version = wf_info('file://' + local_loc)[0] # Don't take the file_type here, found it above. os.remove(local_loc) # TODO: Find a way to avoid recreating file before version determination. else: raise NotImplementedError('Unsupported workflow file location: {}. Must be local or HTTP(S).'.format(workflow_path)) else: raise TypeError('Unsupported workflow type: .{}. Must be {}.'.format(file_type, '.py, .cwl, or .wdl')) return version, file_type.upper()
[ "def", "wf_info", "(", "workflow_path", ")", ":", "supported_formats", "=", "[", "'py'", ",", "'wdl'", ",", "'cwl'", "]", "file_type", "=", "workflow_path", ".", "lower", "(", ")", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", "# Grab the file extension", "workflow_path", "=", "workflow_path", "if", "':'", "in", "workflow_path", "else", "'file://'", "+", "workflow_path", "if", "file_type", "in", "supported_formats", ":", "if", "workflow_path", ".", "startswith", "(", "'file://'", ")", ":", "version", "=", "get_version", "(", "file_type", ",", "workflow_path", "[", "7", ":", "]", ")", "elif", "workflow_path", ".", "startswith", "(", "'https://'", ")", "or", "workflow_path", ".", "startswith", "(", "'http://'", ")", ":", "# If file not local go fetch it.", "html", "=", "urlopen", "(", "workflow_path", ")", ".", "read", "(", ")", "local_loc", "=", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", ",", "'fetchedFromRemote.'", "+", "file_type", ")", "with", "open", "(", "local_loc", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "html", ".", "decode", "(", ")", ")", "version", "=", "wf_info", "(", "'file://'", "+", "local_loc", ")", "[", "0", "]", "# Don't take the file_type here, found it above.", "os", ".", "remove", "(", "local_loc", ")", "# TODO: Find a way to avoid recreating file before version determination.", "else", ":", "raise", "NotImplementedError", "(", "'Unsupported workflow file location: {}. Must be local or HTTP(S).'", ".", "format", "(", "workflow_path", ")", ")", "else", ":", "raise", "TypeError", "(", "'Unsupported workflow type: .{}. Must be {}.'", ".", "format", "(", "file_type", ",", "'.py, .cwl, or .wdl'", ")", ")", "return", "version", ",", "file_type", ".", "upper", "(", ")" ]
Returns the version of the file and the file extension. Assumes that the file path is to the file directly ie, ends with a valid file extension.Supports checking local files as well as files at http:// and https:// locations. Files at these remote locations are recreated locally to enable our approach to version checking, then removed after version is extracted.
[ "Returns", "the", "version", "of", "the", "file", "and", "the", "file", "extension", "." ]
train
https://github.com/common-workflow-language/workflow-service/blob/e879604b65c55546e4f87be1c9df9903a3e0b896/wes_client/util.py#L41-L69
common-workflow-language/workflow-service
wes_client/util.py
modify_jsonyaml_paths
def modify_jsonyaml_paths(jsonyaml_file): """ Changes relative paths in a json/yaml file to be relative to where the json/yaml file is located. :param jsonyaml_file: Path to a json/yaml file. """ loader = schema_salad.ref_resolver.Loader({ "location": {"@type": "@id"}, "path": {"@type": "@id"} }) input_dict, _ = loader.resolve_ref(jsonyaml_file, checklinks=False) basedir = os.path.dirname(jsonyaml_file) def fixpaths(d): """Make sure all paths have a URI scheme.""" if isinstance(d, dict): if "path" in d: if ":" not in d["path"]: local_path = os.path.normpath(os.path.join(os.getcwd(), basedir, d["path"])) d["location"] = pathname2url(local_path) else: d["location"] = d["path"] del d["path"] visit(input_dict, fixpaths) return json.dumps(input_dict)
python
def modify_jsonyaml_paths(jsonyaml_file): """ Changes relative paths in a json/yaml file to be relative to where the json/yaml file is located. :param jsonyaml_file: Path to a json/yaml file. """ loader = schema_salad.ref_resolver.Loader({ "location": {"@type": "@id"}, "path": {"@type": "@id"} }) input_dict, _ = loader.resolve_ref(jsonyaml_file, checklinks=False) basedir = os.path.dirname(jsonyaml_file) def fixpaths(d): """Make sure all paths have a URI scheme.""" if isinstance(d, dict): if "path" in d: if ":" not in d["path"]: local_path = os.path.normpath(os.path.join(os.getcwd(), basedir, d["path"])) d["location"] = pathname2url(local_path) else: d["location"] = d["path"] del d["path"] visit(input_dict, fixpaths) return json.dumps(input_dict)
[ "def", "modify_jsonyaml_paths", "(", "jsonyaml_file", ")", ":", "loader", "=", "schema_salad", ".", "ref_resolver", ".", "Loader", "(", "{", "\"location\"", ":", "{", "\"@type\"", ":", "\"@id\"", "}", ",", "\"path\"", ":", "{", "\"@type\"", ":", "\"@id\"", "}", "}", ")", "input_dict", ",", "_", "=", "loader", ".", "resolve_ref", "(", "jsonyaml_file", ",", "checklinks", "=", "False", ")", "basedir", "=", "os", ".", "path", ".", "dirname", "(", "jsonyaml_file", ")", "def", "fixpaths", "(", "d", ")", ":", "\"\"\"Make sure all paths have a URI scheme.\"\"\"", "if", "isinstance", "(", "d", ",", "dict", ")", ":", "if", "\"path\"", "in", "d", ":", "if", "\":\"", "not", "in", "d", "[", "\"path\"", "]", ":", "local_path", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", ",", "basedir", ",", "d", "[", "\"path\"", "]", ")", ")", "d", "[", "\"location\"", "]", "=", "pathname2url", "(", "local_path", ")", "else", ":", "d", "[", "\"location\"", "]", "=", "d", "[", "\"path\"", "]", "del", "d", "[", "\"path\"", "]", "visit", "(", "input_dict", ",", "fixpaths", ")", "return", "json", ".", "dumps", "(", "input_dict", ")" ]
Changes relative paths in a json/yaml file to be relative to where the json/yaml file is located. :param jsonyaml_file: Path to a json/yaml file.
[ "Changes", "relative", "paths", "in", "a", "json", "/", "yaml", "file", "to", "be", "relative", "to", "where", "the", "json", "/", "yaml", "file", "is", "located", "." ]
train
https://github.com/common-workflow-language/workflow-service/blob/e879604b65c55546e4f87be1c9df9903a3e0b896/wes_client/util.py#L72-L98
common-workflow-language/workflow-service
wes_client/util.py
build_wes_request
def build_wes_request(workflow_file, json_path, attachments=None): """ :param str workflow_file: Path to cwl/wdl file. Can be http/https/file. :param json_path: Path to accompanying json file. :param attachments: Any other files needing to be uploaded to the server. :return: A list of tuples formatted to be sent in a post to the wes-server (Swagger API). """ workflow_file = "file://" + workflow_file if ":" not in workflow_file else workflow_file wfbase = None if json_path.startswith("file://"): wfbase = os.path.dirname(json_path[7:]) json_path = json_path[7:] with open(json_path) as f: wf_params = json.dumps(json.load(f)) elif json_path.startswith("http"): wf_params = modify_jsonyaml_paths(json_path) else: wf_params = json_path wf_version, wf_type = wf_info(workflow_file) parts = [("workflow_params", wf_params), ("workflow_type", wf_type), ("workflow_type_version", wf_version)] if workflow_file.startswith("file://"): if wfbase is None: wfbase = os.path.dirname(workflow_file[7:]) parts.append(("workflow_attachment", (os.path.basename(workflow_file[7:]), open(workflow_file[7:], "rb")))) parts.append(("workflow_url", os.path.basename(workflow_file[7:]))) else: parts.append(("workflow_url", workflow_file)) if wfbase is None: wfbase = os.getcwd() if attachments: for attachment in attachments: if attachment.startswith("file://"): attachment = attachment[7:] attach_f = open(attachment, "rb") relpath = os.path.relpath(attachment, wfbase) elif attachment.startswith("http"): attach_f = urlopen(attachment) relpath = os.path.basename(attach_f) parts.append(("workflow_attachment", (relpath, attach_f))) return parts
python
def build_wes_request(workflow_file, json_path, attachments=None): """ :param str workflow_file: Path to cwl/wdl file. Can be http/https/file. :param json_path: Path to accompanying json file. :param attachments: Any other files needing to be uploaded to the server. :return: A list of tuples formatted to be sent in a post to the wes-server (Swagger API). """ workflow_file = "file://" + workflow_file if ":" not in workflow_file else workflow_file wfbase = None if json_path.startswith("file://"): wfbase = os.path.dirname(json_path[7:]) json_path = json_path[7:] with open(json_path) as f: wf_params = json.dumps(json.load(f)) elif json_path.startswith("http"): wf_params = modify_jsonyaml_paths(json_path) else: wf_params = json_path wf_version, wf_type = wf_info(workflow_file) parts = [("workflow_params", wf_params), ("workflow_type", wf_type), ("workflow_type_version", wf_version)] if workflow_file.startswith("file://"): if wfbase is None: wfbase = os.path.dirname(workflow_file[7:]) parts.append(("workflow_attachment", (os.path.basename(workflow_file[7:]), open(workflow_file[7:], "rb")))) parts.append(("workflow_url", os.path.basename(workflow_file[7:]))) else: parts.append(("workflow_url", workflow_file)) if wfbase is None: wfbase = os.getcwd() if attachments: for attachment in attachments: if attachment.startswith("file://"): attachment = attachment[7:] attach_f = open(attachment, "rb") relpath = os.path.relpath(attachment, wfbase) elif attachment.startswith("http"): attach_f = urlopen(attachment) relpath = os.path.basename(attach_f) parts.append(("workflow_attachment", (relpath, attach_f))) return parts
[ "def", "build_wes_request", "(", "workflow_file", ",", "json_path", ",", "attachments", "=", "None", ")", ":", "workflow_file", "=", "\"file://\"", "+", "workflow_file", "if", "\":\"", "not", "in", "workflow_file", "else", "workflow_file", "wfbase", "=", "None", "if", "json_path", ".", "startswith", "(", "\"file://\"", ")", ":", "wfbase", "=", "os", ".", "path", ".", "dirname", "(", "json_path", "[", "7", ":", "]", ")", "json_path", "=", "json_path", "[", "7", ":", "]", "with", "open", "(", "json_path", ")", "as", "f", ":", "wf_params", "=", "json", ".", "dumps", "(", "json", ".", "load", "(", "f", ")", ")", "elif", "json_path", ".", "startswith", "(", "\"http\"", ")", ":", "wf_params", "=", "modify_jsonyaml_paths", "(", "json_path", ")", "else", ":", "wf_params", "=", "json_path", "wf_version", ",", "wf_type", "=", "wf_info", "(", "workflow_file", ")", "parts", "=", "[", "(", "\"workflow_params\"", ",", "wf_params", ")", ",", "(", "\"workflow_type\"", ",", "wf_type", ")", ",", "(", "\"workflow_type_version\"", ",", "wf_version", ")", "]", "if", "workflow_file", ".", "startswith", "(", "\"file://\"", ")", ":", "if", "wfbase", "is", "None", ":", "wfbase", "=", "os", ".", "path", ".", "dirname", "(", "workflow_file", "[", "7", ":", "]", ")", "parts", ".", "append", "(", "(", "\"workflow_attachment\"", ",", "(", "os", ".", "path", ".", "basename", "(", "workflow_file", "[", "7", ":", "]", ")", ",", "open", "(", "workflow_file", "[", "7", ":", "]", ",", "\"rb\"", ")", ")", ")", ")", "parts", ".", "append", "(", "(", "\"workflow_url\"", ",", "os", ".", "path", ".", "basename", "(", "workflow_file", "[", "7", ":", "]", ")", ")", ")", "else", ":", "parts", ".", "append", "(", "(", "\"workflow_url\"", ",", "workflow_file", ")", ")", "if", "wfbase", "is", "None", ":", "wfbase", "=", "os", ".", "getcwd", "(", ")", "if", "attachments", ":", "for", "attachment", "in", "attachments", ":", "if", "attachment", ".", "startswith", "(", "\"file://\"", ")", ":", "attachment", "=", "attachment", "[", "7", ":", "]", "attach_f", "=", "open", "(", "attachment", ",", "\"rb\"", ")", "relpath", "=", "os", ".", "path", ".", "relpath", "(", "attachment", ",", "wfbase", ")", "elif", "attachment", ".", "startswith", "(", "\"http\"", ")", ":", "attach_f", "=", "urlopen", "(", "attachment", ")", "relpath", "=", "os", ".", "path", ".", "basename", "(", "attach_f", ")", "parts", ".", "append", "(", "(", "\"workflow_attachment\"", ",", "(", "relpath", ",", "attach_f", ")", ")", ")", "return", "parts" ]
:param str workflow_file: Path to cwl/wdl file. Can be http/https/file. :param json_path: Path to accompanying json file. :param attachments: Any other files needing to be uploaded to the server. :return: A list of tuples formatted to be sent in a post to the wes-server (Swagger API).
[ ":", "param", "str", "workflow_file", ":", "Path", "to", "cwl", "/", "wdl", "file", ".", "Can", "be", "http", "/", "https", "/", "file", ".", ":", "param", "json_path", ":", "Path", "to", "accompanying", "json", "file", ".", ":", "param", "attachments", ":", "Any", "other", "files", "needing", "to", "be", "uploaded", "to", "the", "server", "." ]
train
https://github.com/common-workflow-language/workflow-service/blob/e879604b65c55546e4f87be1c9df9903a3e0b896/wes_client/util.py#L101-L148
common-workflow-language/workflow-service
wes_client/util.py
WESClient.get_service_info
def get_service_info(self): """ Get information about Workflow Execution Service. May include information related (but not limited to) the workflow descriptor formats, versions supported, the WES API versions supported, and information about general the service availability. :param str auth: String to send in the auth header. :param proto: Schema where the server resides (http, https) :param host: Port where the post request will be sent and the wes server listens at (default 8080) :return: The body of the get result as a dictionary. """ postresult = requests.get("%s://%s/ga4gh/wes/v1/service-info" % (self.proto, self.host), headers=self.auth) return wes_reponse(postresult)
python
def get_service_info(self): """ Get information about Workflow Execution Service. May include information related (but not limited to) the workflow descriptor formats, versions supported, the WES API versions supported, and information about general the service availability. :param str auth: String to send in the auth header. :param proto: Schema where the server resides (http, https) :param host: Port where the post request will be sent and the wes server listens at (default 8080) :return: The body of the get result as a dictionary. """ postresult = requests.get("%s://%s/ga4gh/wes/v1/service-info" % (self.proto, self.host), headers=self.auth) return wes_reponse(postresult)
[ "def", "get_service_info", "(", "self", ")", ":", "postresult", "=", "requests", ".", "get", "(", "\"%s://%s/ga4gh/wes/v1/service-info\"", "%", "(", "self", ".", "proto", ",", "self", ".", "host", ")", ",", "headers", "=", "self", ".", "auth", ")", "return", "wes_reponse", "(", "postresult", ")" ]
Get information about Workflow Execution Service. May include information related (but not limited to) the workflow descriptor formats, versions supported, the WES API versions supported, and information about general the service availability. :param str auth: String to send in the auth header. :param proto: Schema where the server resides (http, https) :param host: Port where the post request will be sent and the wes server listens at (default 8080) :return: The body of the get result as a dictionary.
[ "Get", "information", "about", "Workflow", "Execution", "Service", ".", "May", "include", "information", "related", "(", "but", "not", "limited", "to", ")", "the", "workflow", "descriptor", "formats", "versions", "supported", "the", "WES", "API", "versions", "supported", "and", "information", "about", "general", "the", "service", "availability", "." ]
train
https://github.com/common-workflow-language/workflow-service/blob/e879604b65c55546e4f87be1c9df9903a3e0b896/wes_client/util.py#L180-L195
common-workflow-language/workflow-service
wes_client/util.py
WESClient.run
def run(self, wf, jsonyaml, attachments): """ Composes and sends a post request that signals the wes server to run a workflow. :param str workflow_file: A local/http/https path to a cwl/wdl/python workflow file. :param str jsonyaml: A local path to a json or yaml file. :param list attachments: A list of local paths to files that will be uploaded to the server. :param str auth: String to send in the auth header. :param proto: Schema where the server resides (http, https) :param host: Port where the post request will be sent and the wes server listens at (default 8080) :return: The body of the post result as a dictionary. """ attachments = list(expand_globs(attachments)) parts = build_wes_request(wf, jsonyaml, attachments) postresult = requests.post("%s://%s/ga4gh/wes/v1/runs" % (self.proto, self.host), files=parts, headers=self.auth) return wes_reponse(postresult)
python
def run(self, wf, jsonyaml, attachments): """ Composes and sends a post request that signals the wes server to run a workflow. :param str workflow_file: A local/http/https path to a cwl/wdl/python workflow file. :param str jsonyaml: A local path to a json or yaml file. :param list attachments: A list of local paths to files that will be uploaded to the server. :param str auth: String to send in the auth header. :param proto: Schema where the server resides (http, https) :param host: Port where the post request will be sent and the wes server listens at (default 8080) :return: The body of the post result as a dictionary. """ attachments = list(expand_globs(attachments)) parts = build_wes_request(wf, jsonyaml, attachments) postresult = requests.post("%s://%s/ga4gh/wes/v1/runs" % (self.proto, self.host), files=parts, headers=self.auth) return wes_reponse(postresult)
[ "def", "run", "(", "self", ",", "wf", ",", "jsonyaml", ",", "attachments", ")", ":", "attachments", "=", "list", "(", "expand_globs", "(", "attachments", ")", ")", "parts", "=", "build_wes_request", "(", "wf", ",", "jsonyaml", ",", "attachments", ")", "postresult", "=", "requests", ".", "post", "(", "\"%s://%s/ga4gh/wes/v1/runs\"", "%", "(", "self", ".", "proto", ",", "self", ".", "host", ")", ",", "files", "=", "parts", ",", "headers", "=", "self", ".", "auth", ")", "return", "wes_reponse", "(", "postresult", ")" ]
Composes and sends a post request that signals the wes server to run a workflow. :param str workflow_file: A local/http/https path to a cwl/wdl/python workflow file. :param str jsonyaml: A local path to a json or yaml file. :param list attachments: A list of local paths to files that will be uploaded to the server. :param str auth: String to send in the auth header. :param proto: Schema where the server resides (http, https) :param host: Port where the post request will be sent and the wes server listens at (default 8080) :return: The body of the post result as a dictionary.
[ "Composes", "and", "sends", "a", "post", "request", "that", "signals", "the", "wes", "server", "to", "run", "a", "workflow", "." ]
train
https://github.com/common-workflow-language/workflow-service/blob/e879604b65c55546e4f87be1c9df9903a3e0b896/wes_client/util.py#L213-L231
common-workflow-language/workflow-service
wes_client/util.py
WESClient.cancel
def cancel(self, run_id): """ Cancel a running workflow. :param run_id: String (typically a uuid) identifying the run. :param str auth: String to send in the auth header. :param proto: Schema where the server resides (http, https) :param host: Port where the post request will be sent and the wes server listens at (default 8080) :return: The body of the delete result as a dictionary. """ postresult = requests.post("%s://%s/ga4gh/wes/v1/runs/%s/cancel" % (self.proto, self.host, run_id), headers=self.auth) return wes_reponse(postresult)
python
def cancel(self, run_id): """ Cancel a running workflow. :param run_id: String (typically a uuid) identifying the run. :param str auth: String to send in the auth header. :param proto: Schema where the server resides (http, https) :param host: Port where the post request will be sent and the wes server listens at (default 8080) :return: The body of the delete result as a dictionary. """ postresult = requests.post("%s://%s/ga4gh/wes/v1/runs/%s/cancel" % (self.proto, self.host, run_id), headers=self.auth) return wes_reponse(postresult)
[ "def", "cancel", "(", "self", ",", "run_id", ")", ":", "postresult", "=", "requests", ".", "post", "(", "\"%s://%s/ga4gh/wes/v1/runs/%s/cancel\"", "%", "(", "self", ".", "proto", ",", "self", ".", "host", ",", "run_id", ")", ",", "headers", "=", "self", ".", "auth", ")", "return", "wes_reponse", "(", "postresult", ")" ]
Cancel a running workflow. :param run_id: String (typically a uuid) identifying the run. :param str auth: String to send in the auth header. :param proto: Schema where the server resides (http, https) :param host: Port where the post request will be sent and the wes server listens at (default 8080) :return: The body of the delete result as a dictionary.
[ "Cancel", "a", "running", "workflow", "." ]
train
https://github.com/common-workflow-language/workflow-service/blob/e879604b65c55546e4f87be1c9df9903a3e0b896/wes_client/util.py#L233-L245
common-workflow-language/workflow-service
wes_client/util.py
WESClient.get_run_log
def get_run_log(self, run_id): """ Get detailed info about a running workflow. :param run_id: String (typically a uuid) identifying the run. :param str auth: String to send in the auth header. :param proto: Schema where the server resides (http, https) :param host: Port where the post request will be sent and the wes server listens at (default 8080) :return: The body of the get result as a dictionary. """ postresult = requests.get("%s://%s/ga4gh/wes/v1/runs/%s" % (self.proto, self.host, run_id), headers=self.auth) return wes_reponse(postresult)
python
def get_run_log(self, run_id): """ Get detailed info about a running workflow. :param run_id: String (typically a uuid) identifying the run. :param str auth: String to send in the auth header. :param proto: Schema where the server resides (http, https) :param host: Port where the post request will be sent and the wes server listens at (default 8080) :return: The body of the get result as a dictionary. """ postresult = requests.get("%s://%s/ga4gh/wes/v1/runs/%s" % (self.proto, self.host, run_id), headers=self.auth) return wes_reponse(postresult)
[ "def", "get_run_log", "(", "self", ",", "run_id", ")", ":", "postresult", "=", "requests", ".", "get", "(", "\"%s://%s/ga4gh/wes/v1/runs/%s\"", "%", "(", "self", ".", "proto", ",", "self", ".", "host", ",", "run_id", ")", ",", "headers", "=", "self", ".", "auth", ")", "return", "wes_reponse", "(", "postresult", ")" ]
Get detailed info about a running workflow. :param run_id: String (typically a uuid) identifying the run. :param str auth: String to send in the auth header. :param proto: Schema where the server resides (http, https) :param host: Port where the post request will be sent and the wes server listens at (default 8080) :return: The body of the get result as a dictionary.
[ "Get", "detailed", "info", "about", "a", "running", "workflow", "." ]
train
https://github.com/common-workflow-language/workflow-service/blob/e879604b65c55546e4f87be1c9df9903a3e0b896/wes_client/util.py#L247-L259