repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
sequence
docstring
stringlengths
3
17.3k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
totalgood/pugnlp
src/pugnlp/util.py
sort_strings
def sort_strings(strings, sort_order=None, reverse=False, case_sensitive=False, sort_order_first=True): """Sort a list of strings according to the provided sorted list of string prefixes TODO: - Provide an option to use `.startswith()` rather than a fixed prefix length (will be much slower) Arguments: sort_order_first (bool): Whether strings in sort_order should always preceed "unknown" strings sort_order (sequence of str): Desired ordering as a list of prefixes to the strings If sort_order strings have varying length, the max length will determine the prefix length compared reverse (bool): whether to reverse the sort orded. Passed through to `sorted(strings, reverse=reverse)` case_senstive (bool): Whether to sort in lexographic rather than alphabetic order and whether the prefixes in sort_order are checked in a case-sensitive way Examples: >>> sort_strings(['morn32', 'morning', 'unknown', 'date', 'dow', 'doy', 'moy'], ... ('dat', 'dow', 'moy', 'dom', 'doy', 'mor')) ['date', 'dow', 'moy', 'doy', 'morn32', 'morning', 'unknown'] >>> sort_strings(['morn32', 'morning', 'unknown', 'less unknown', 'lucy', 'date', 'dow', 'doy', 'moy'], ... ('dat', 'dow', 'moy', 'dom', 'doy', 'mor'), reverse=True) ['unknown', 'lucy', 'less unknown', 'morning', 'morn32', 'doy', 'moy', 'dow', 'date'] Strings whose prefixes don't exist in `sort_order` sequence can be interleaved into the sorted list in lexical order by setting `sort_order_first=False` >>> sort_strings(['morn32', 'morning', 'unknown', 'lucy', 'less unknown', 'date', 'dow', 'doy', 'moy'], ... ('dat', 'dow', 'moy', 'dom', 'moy', 'mor'), ... sort_order_first=False) # doctest: +NORMALIZE_WHITESPACE ['date', 'dow', 'doy', 'less unknown', 'lucy', 'moy', 'morn32', 'morning', 'unknown'] """ if not case_sensitive: sort_order = tuple(s.lower() for s in sort_order) strings = tuple(s.lower() for s in strings) prefix_len = max(len(s) for s in sort_order) def compare(a, b, prefix_len=prefix_len): if prefix_len: if a[:prefix_len] in sort_order: if b[:prefix_len] in sort_order: comparison = sort_order.index(a[:prefix_len]) - sort_order.index(b[:prefix_len]) comparison = int(comparison / abs(comparison or 1)) if comparison: return comparison * (-2 * reverse + 1) elif sort_order_first: return -1 * (-2 * reverse + 1) # b may be in sort_order list, so it should be first elif sort_order_first and b[:prefix_len] in sort_order: return -2 * reverse + 1 return (-1 * (a < b) + 1 * (a > b)) * (-2 * reverse + 1) return sorted(strings, key=functools.cmp_to_key(compare))
python
def sort_strings(strings, sort_order=None, reverse=False, case_sensitive=False, sort_order_first=True): """Sort a list of strings according to the provided sorted list of string prefixes TODO: - Provide an option to use `.startswith()` rather than a fixed prefix length (will be much slower) Arguments: sort_order_first (bool): Whether strings in sort_order should always preceed "unknown" strings sort_order (sequence of str): Desired ordering as a list of prefixes to the strings If sort_order strings have varying length, the max length will determine the prefix length compared reverse (bool): whether to reverse the sort orded. Passed through to `sorted(strings, reverse=reverse)` case_senstive (bool): Whether to sort in lexographic rather than alphabetic order and whether the prefixes in sort_order are checked in a case-sensitive way Examples: >>> sort_strings(['morn32', 'morning', 'unknown', 'date', 'dow', 'doy', 'moy'], ... ('dat', 'dow', 'moy', 'dom', 'doy', 'mor')) ['date', 'dow', 'moy', 'doy', 'morn32', 'morning', 'unknown'] >>> sort_strings(['morn32', 'morning', 'unknown', 'less unknown', 'lucy', 'date', 'dow', 'doy', 'moy'], ... ('dat', 'dow', 'moy', 'dom', 'doy', 'mor'), reverse=True) ['unknown', 'lucy', 'less unknown', 'morning', 'morn32', 'doy', 'moy', 'dow', 'date'] Strings whose prefixes don't exist in `sort_order` sequence can be interleaved into the sorted list in lexical order by setting `sort_order_first=False` >>> sort_strings(['morn32', 'morning', 'unknown', 'lucy', 'less unknown', 'date', 'dow', 'doy', 'moy'], ... ('dat', 'dow', 'moy', 'dom', 'moy', 'mor'), ... sort_order_first=False) # doctest: +NORMALIZE_WHITESPACE ['date', 'dow', 'doy', 'less unknown', 'lucy', 'moy', 'morn32', 'morning', 'unknown'] """ if not case_sensitive: sort_order = tuple(s.lower() for s in sort_order) strings = tuple(s.lower() for s in strings) prefix_len = max(len(s) for s in sort_order) def compare(a, b, prefix_len=prefix_len): if prefix_len: if a[:prefix_len] in sort_order: if b[:prefix_len] in sort_order: comparison = sort_order.index(a[:prefix_len]) - sort_order.index(b[:prefix_len]) comparison = int(comparison / abs(comparison or 1)) if comparison: return comparison * (-2 * reverse + 1) elif sort_order_first: return -1 * (-2 * reverse + 1) # b may be in sort_order list, so it should be first elif sort_order_first and b[:prefix_len] in sort_order: return -2 * reverse + 1 return (-1 * (a < b) + 1 * (a > b)) * (-2 * reverse + 1) return sorted(strings, key=functools.cmp_to_key(compare))
[ "def", "sort_strings", "(", "strings", ",", "sort_order", "=", "None", ",", "reverse", "=", "False", ",", "case_sensitive", "=", "False", ",", "sort_order_first", "=", "True", ")", ":", "if", "not", "case_sensitive", ":", "sort_order", "=", "tuple", "(", "s", ".", "lower", "(", ")", "for", "s", "in", "sort_order", ")", "strings", "=", "tuple", "(", "s", ".", "lower", "(", ")", "for", "s", "in", "strings", ")", "prefix_len", "=", "max", "(", "len", "(", "s", ")", "for", "s", "in", "sort_order", ")", "def", "compare", "(", "a", ",", "b", ",", "prefix_len", "=", "prefix_len", ")", ":", "if", "prefix_len", ":", "if", "a", "[", ":", "prefix_len", "]", "in", "sort_order", ":", "if", "b", "[", ":", "prefix_len", "]", "in", "sort_order", ":", "comparison", "=", "sort_order", ".", "index", "(", "a", "[", ":", "prefix_len", "]", ")", "-", "sort_order", ".", "index", "(", "b", "[", ":", "prefix_len", "]", ")", "comparison", "=", "int", "(", "comparison", "/", "abs", "(", "comparison", "or", "1", ")", ")", "if", "comparison", ":", "return", "comparison", "*", "(", "-", "2", "*", "reverse", "+", "1", ")", "elif", "sort_order_first", ":", "return", "-", "1", "*", "(", "-", "2", "*", "reverse", "+", "1", ")", "# b may be in sort_order list, so it should be first", "elif", "sort_order_first", "and", "b", "[", ":", "prefix_len", "]", "in", "sort_order", ":", "return", "-", "2", "*", "reverse", "+", "1", "return", "(", "-", "1", "*", "(", "a", "<", "b", ")", "+", "1", "*", "(", "a", ">", "b", ")", ")", "*", "(", "-", "2", "*", "reverse", "+", "1", ")", "return", "sorted", "(", "strings", ",", "key", "=", "functools", ".", "cmp_to_key", "(", "compare", ")", ")" ]
Sort a list of strings according to the provided sorted list of string prefixes TODO: - Provide an option to use `.startswith()` rather than a fixed prefix length (will be much slower) Arguments: sort_order_first (bool): Whether strings in sort_order should always preceed "unknown" strings sort_order (sequence of str): Desired ordering as a list of prefixes to the strings If sort_order strings have varying length, the max length will determine the prefix length compared reverse (bool): whether to reverse the sort orded. Passed through to `sorted(strings, reverse=reverse)` case_senstive (bool): Whether to sort in lexographic rather than alphabetic order and whether the prefixes in sort_order are checked in a case-sensitive way Examples: >>> sort_strings(['morn32', 'morning', 'unknown', 'date', 'dow', 'doy', 'moy'], ... ('dat', 'dow', 'moy', 'dom', 'doy', 'mor')) ['date', 'dow', 'moy', 'doy', 'morn32', 'morning', 'unknown'] >>> sort_strings(['morn32', 'morning', 'unknown', 'less unknown', 'lucy', 'date', 'dow', 'doy', 'moy'], ... ('dat', 'dow', 'moy', 'dom', 'doy', 'mor'), reverse=True) ['unknown', 'lucy', 'less unknown', 'morning', 'morn32', 'doy', 'moy', 'dow', 'date'] Strings whose prefixes don't exist in `sort_order` sequence can be interleaved into the sorted list in lexical order by setting `sort_order_first=False` >>> sort_strings(['morn32', 'morning', 'unknown', 'lucy', 'less unknown', 'date', 'dow', 'doy', 'moy'], ... ('dat', 'dow', 'moy', 'dom', 'moy', 'mor'), ... sort_order_first=False) # doctest: +NORMALIZE_WHITESPACE ['date', 'dow', 'doy', 'less unknown', 'lucy', 'moy', 'morn32', 'morning', 'unknown']
[ "Sort", "a", "list", "of", "strings", "according", "to", "the", "provided", "sorted", "list", "of", "string", "prefixes" ]
c43445b14afddfdeadc5f3076675c9e8fc1ee67c
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L189-L238
train
totalgood/pugnlp
src/pugnlp/util.py
clean_field_dict
def clean_field_dict(field_dict, cleaner=str.strip, time_zone=None): r"""Normalize field values by stripping whitespace from strings, localizing datetimes to a timezone, etc >>> (sorted(clean_field_dict({'_state': object(), 'x': 1, 'y': "\t Wash Me! \n" }).items()) == ... [('x', 1), ('y', 'Wash Me!')]) True """ d = {} if time_zone is None: tz = DEFAULT_TZ for k, v in viewitems(field_dict): if k == '_state': continue if isinstance(v, basestring): d[k] = cleaner(str(v)) elif isinstance(v, (datetime.datetime, datetime.date)): d[k] = tz.localize(v) else: d[k] = v return d
python
def clean_field_dict(field_dict, cleaner=str.strip, time_zone=None): r"""Normalize field values by stripping whitespace from strings, localizing datetimes to a timezone, etc >>> (sorted(clean_field_dict({'_state': object(), 'x': 1, 'y': "\t Wash Me! \n" }).items()) == ... [('x', 1), ('y', 'Wash Me!')]) True """ d = {} if time_zone is None: tz = DEFAULT_TZ for k, v in viewitems(field_dict): if k == '_state': continue if isinstance(v, basestring): d[k] = cleaner(str(v)) elif isinstance(v, (datetime.datetime, datetime.date)): d[k] = tz.localize(v) else: d[k] = v return d
[ "def", "clean_field_dict", "(", "field_dict", ",", "cleaner", "=", "str", ".", "strip", ",", "time_zone", "=", "None", ")", ":", "d", "=", "{", "}", "if", "time_zone", "is", "None", ":", "tz", "=", "DEFAULT_TZ", "for", "k", ",", "v", "in", "viewitems", "(", "field_dict", ")", ":", "if", "k", "==", "'_state'", ":", "continue", "if", "isinstance", "(", "v", ",", "basestring", ")", ":", "d", "[", "k", "]", "=", "cleaner", "(", "str", "(", "v", ")", ")", "elif", "isinstance", "(", "v", ",", "(", "datetime", ".", "datetime", ",", "datetime", ".", "date", ")", ")", ":", "d", "[", "k", "]", "=", "tz", ".", "localize", "(", "v", ")", "else", ":", "d", "[", "k", "]", "=", "v", "return", "d" ]
r"""Normalize field values by stripping whitespace from strings, localizing datetimes to a timezone, etc >>> (sorted(clean_field_dict({'_state': object(), 'x': 1, 'y': "\t Wash Me! \n" }).items()) == ... [('x', 1), ('y', 'Wash Me!')]) True
[ "r", "Normalize", "field", "values", "by", "stripping", "whitespace", "from", "strings", "localizing", "datetimes", "to", "a", "timezone", "etc" ]
c43445b14afddfdeadc5f3076675c9e8fc1ee67c
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L241-L260
train
totalgood/pugnlp
src/pugnlp/util.py
generate_tuple_batches
def generate_tuple_batches(qs, batch_len=1): """Iterate through a queryset in batches of length `batch_len` >>> [batch for batch in generate_tuple_batches(range(7), 3)] [(0, 1, 2), (3, 4, 5), (6,)] """ num_items, batch = 0, [] for item in qs: if num_items >= batch_len: yield tuple(batch) num_items = 0 batch = [] num_items += 1 batch += [item] if num_items: yield tuple(batch)
python
def generate_tuple_batches(qs, batch_len=1): """Iterate through a queryset in batches of length `batch_len` >>> [batch for batch in generate_tuple_batches(range(7), 3)] [(0, 1, 2), (3, 4, 5), (6,)] """ num_items, batch = 0, [] for item in qs: if num_items >= batch_len: yield tuple(batch) num_items = 0 batch = [] num_items += 1 batch += [item] if num_items: yield tuple(batch)
[ "def", "generate_tuple_batches", "(", "qs", ",", "batch_len", "=", "1", ")", ":", "num_items", ",", "batch", "=", "0", ",", "[", "]", "for", "item", "in", "qs", ":", "if", "num_items", ">=", "batch_len", ":", "yield", "tuple", "(", "batch", ")", "num_items", "=", "0", "batch", "=", "[", "]", "num_items", "+=", "1", "batch", "+=", "[", "item", "]", "if", "num_items", ":", "yield", "tuple", "(", "batch", ")" ]
Iterate through a queryset in batches of length `batch_len` >>> [batch for batch in generate_tuple_batches(range(7), 3)] [(0, 1, 2), (3, 4, 5), (6,)]
[ "Iterate", "through", "a", "queryset", "in", "batches", "of", "length", "batch_len" ]
c43445b14afddfdeadc5f3076675c9e8fc1ee67c
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L491-L506
train
totalgood/pugnlp
src/pugnlp/util.py
find_count_label
def find_count_label(d): """Find the member of a set that means "count" or "frequency" or "probability" or "number of occurrences". """ for name in COUNT_NAMES: if name in d: return name for name in COUNT_NAMES: if str(name).lower() in d: return name
python
def find_count_label(d): """Find the member of a set that means "count" or "frequency" or "probability" or "number of occurrences". """ for name in COUNT_NAMES: if name in d: return name for name in COUNT_NAMES: if str(name).lower() in d: return name
[ "def", "find_count_label", "(", "d", ")", ":", "for", "name", "in", "COUNT_NAMES", ":", "if", "name", "in", "d", ":", "return", "name", "for", "name", "in", "COUNT_NAMES", ":", "if", "str", "(", "name", ")", ".", "lower", "(", ")", "in", "d", ":", "return", "name" ]
Find the member of a set that means "count" or "frequency" or "probability" or "number of occurrences".
[ "Find", "the", "member", "of", "a", "set", "that", "means", "count", "or", "frequency", "or", "probability", "or", "number", "of", "occurrences", "." ]
c43445b14afddfdeadc5f3076675c9e8fc1ee67c
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L569-L578
train
totalgood/pugnlp
src/pugnlp/util.py
fuzzy_get_value
def fuzzy_get_value(obj, approximate_key, default=None, **kwargs): """ Like fuzzy_get, but assume the obj is dict-like and return the value without the key Notes: Argument order is in reverse order relative to `fuzzywuzzy.process.extractOne()` but in the same order as get(self, key) method on dicts Arguments: obj (dict-like): object to run the get method on using the key that is most similar to one within the dict approximate_key (str): key to look for a fuzzy match within the dict keys default (obj): the value to return if a similar key cannote be found in the `possible_keys` similarity (str): fractional similiarity between the approximate_key and the dict key (0.9 means 90% of characters must be identical) tuple_joiner (str): Character to use as delimitter/joiner between tuple elements. Used to create keys of any tuples to be able to use fuzzywuzzy string matching on it. key_and_value (bool): Whether to return both the key and its value (True) or just the value (False). Default is the same behavior as dict.get (i.e. key_and_value=False) dict_keys (list of str): if you already have a set of keys to search, this will save this funciton a little time and RAM Examples: >>> fuzzy_get_value({'seller': 2.7, 'sailor': set('e')}, 'sail') == set(['e']) True >>> fuzzy_get_value({'seller': 2.7, 'sailor': set('e'), 'camera': object()}, 'SLR') 2.7 >>> fuzzy_get_value({'seller': 2.7, 'sailor': set('e'), 'camera': object()}, 'I') == set(['e']) True >>> fuzzy_get_value({'word': tuple('word'), 'noun': tuple('noun')}, 'woh!', similarity=.3) ('w', 'o', 'r', 'd') >>> df = pd.DataFrame(np.arange(6*2).reshape(2,6), columns=('alpha','beta','omega','begin','life','end')) >>> fuzzy_get_value(df, 'life')[0], fuzzy_get(df, 'omega')[0] (4, 2) """ dict_obj = OrderedDict(obj) try: return dict_obj[list(dict_obj.keys())[int(approximate_key)]] except (ValueError, IndexError): pass return fuzzy_get(dict_obj, approximate_key, key_and_value=False, **kwargs)
python
def fuzzy_get_value(obj, approximate_key, default=None, **kwargs): """ Like fuzzy_get, but assume the obj is dict-like and return the value without the key Notes: Argument order is in reverse order relative to `fuzzywuzzy.process.extractOne()` but in the same order as get(self, key) method on dicts Arguments: obj (dict-like): object to run the get method on using the key that is most similar to one within the dict approximate_key (str): key to look for a fuzzy match within the dict keys default (obj): the value to return if a similar key cannote be found in the `possible_keys` similarity (str): fractional similiarity between the approximate_key and the dict key (0.9 means 90% of characters must be identical) tuple_joiner (str): Character to use as delimitter/joiner between tuple elements. Used to create keys of any tuples to be able to use fuzzywuzzy string matching on it. key_and_value (bool): Whether to return both the key and its value (True) or just the value (False). Default is the same behavior as dict.get (i.e. key_and_value=False) dict_keys (list of str): if you already have a set of keys to search, this will save this funciton a little time and RAM Examples: >>> fuzzy_get_value({'seller': 2.7, 'sailor': set('e')}, 'sail') == set(['e']) True >>> fuzzy_get_value({'seller': 2.7, 'sailor': set('e'), 'camera': object()}, 'SLR') 2.7 >>> fuzzy_get_value({'seller': 2.7, 'sailor': set('e'), 'camera': object()}, 'I') == set(['e']) True >>> fuzzy_get_value({'word': tuple('word'), 'noun': tuple('noun')}, 'woh!', similarity=.3) ('w', 'o', 'r', 'd') >>> df = pd.DataFrame(np.arange(6*2).reshape(2,6), columns=('alpha','beta','omega','begin','life','end')) >>> fuzzy_get_value(df, 'life')[0], fuzzy_get(df, 'omega')[0] (4, 2) """ dict_obj = OrderedDict(obj) try: return dict_obj[list(dict_obj.keys())[int(approximate_key)]] except (ValueError, IndexError): pass return fuzzy_get(dict_obj, approximate_key, key_and_value=False, **kwargs)
[ "def", "fuzzy_get_value", "(", "obj", ",", "approximate_key", ",", "default", "=", "None", ",", "*", "*", "kwargs", ")", ":", "dict_obj", "=", "OrderedDict", "(", "obj", ")", "try", ":", "return", "dict_obj", "[", "list", "(", "dict_obj", ".", "keys", "(", ")", ")", "[", "int", "(", "approximate_key", ")", "]", "]", "except", "(", "ValueError", ",", "IndexError", ")", ":", "pass", "return", "fuzzy_get", "(", "dict_obj", ",", "approximate_key", ",", "key_and_value", "=", "False", ",", "*", "*", "kwargs", ")" ]
Like fuzzy_get, but assume the obj is dict-like and return the value without the key Notes: Argument order is in reverse order relative to `fuzzywuzzy.process.extractOne()` but in the same order as get(self, key) method on dicts Arguments: obj (dict-like): object to run the get method on using the key that is most similar to one within the dict approximate_key (str): key to look for a fuzzy match within the dict keys default (obj): the value to return if a similar key cannote be found in the `possible_keys` similarity (str): fractional similiarity between the approximate_key and the dict key (0.9 means 90% of characters must be identical) tuple_joiner (str): Character to use as delimitter/joiner between tuple elements. Used to create keys of any tuples to be able to use fuzzywuzzy string matching on it. key_and_value (bool): Whether to return both the key and its value (True) or just the value (False). Default is the same behavior as dict.get (i.e. key_and_value=False) dict_keys (list of str): if you already have a set of keys to search, this will save this funciton a little time and RAM Examples: >>> fuzzy_get_value({'seller': 2.7, 'sailor': set('e')}, 'sail') == set(['e']) True >>> fuzzy_get_value({'seller': 2.7, 'sailor': set('e'), 'camera': object()}, 'SLR') 2.7 >>> fuzzy_get_value({'seller': 2.7, 'sailor': set('e'), 'camera': object()}, 'I') == set(['e']) True >>> fuzzy_get_value({'word': tuple('word'), 'noun': tuple('noun')}, 'woh!', similarity=.3) ('w', 'o', 'r', 'd') >>> df = pd.DataFrame(np.arange(6*2).reshape(2,6), columns=('alpha','beta','omega','begin','life','end')) >>> fuzzy_get_value(df, 'life')[0], fuzzy_get(df, 'omega')[0] (4, 2)
[ "Like", "fuzzy_get", "but", "assume", "the", "obj", "is", "dict", "-", "like", "and", "return", "the", "value", "without", "the", "key" ]
c43445b14afddfdeadc5f3076675c9e8fc1ee67c
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L732-L770
train
totalgood/pugnlp
src/pugnlp/util.py
joined_seq
def joined_seq(seq, sep=None): r"""Join a sequence into a tuple or a concatenated string >>> joined_seq(range(3), ', ') '0, 1, 2' >>> joined_seq([1, 2, 3]) (1, 2, 3) """ joined_seq = tuple(seq) if isinstance(sep, basestring): joined_seq = sep.join(str(item) for item in joined_seq) return joined_seq
python
def joined_seq(seq, sep=None): r"""Join a sequence into a tuple or a concatenated string >>> joined_seq(range(3), ', ') '0, 1, 2' >>> joined_seq([1, 2, 3]) (1, 2, 3) """ joined_seq = tuple(seq) if isinstance(sep, basestring): joined_seq = sep.join(str(item) for item in joined_seq) return joined_seq
[ "def", "joined_seq", "(", "seq", ",", "sep", "=", "None", ")", ":", "joined_seq", "=", "tuple", "(", "seq", ")", "if", "isinstance", "(", "sep", ",", "basestring", ")", ":", "joined_seq", "=", "sep", ".", "join", "(", "str", "(", "item", ")", "for", "item", "in", "joined_seq", ")", "return", "joined_seq" ]
r"""Join a sequence into a tuple or a concatenated string >>> joined_seq(range(3), ', ') '0, 1, 2' >>> joined_seq([1, 2, 3]) (1, 2, 3)
[ "r", "Join", "a", "sequence", "into", "a", "tuple", "or", "a", "concatenated", "string" ]
c43445b14afddfdeadc5f3076675c9e8fc1ee67c
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L811-L822
train
totalgood/pugnlp
src/pugnlp/util.py
dos_from_table
def dos_from_table(table, header=None): """Produce dictionary of sequences from sequence of sequences, optionally with a header "row". >>> dos_from_table([['hello', 'world'], [1, 2], [3,4]]) == {'hello': [1, 3], 'world': [2, 4]} True """ start_row = 0 if not table: return table if not header: header = table[0] start_row = 1 header_list = header if header and isinstance(header, basestring): header_list = header.split('\t') if len(header_list) != len(table[0]): header_list = header.split(',') if len(header_list) != len(table[0]): header_list = header.split(' ') ans = {} for i, k in enumerate(header): ans[k] = [row[i] for row in table[start_row:]] return ans
python
def dos_from_table(table, header=None): """Produce dictionary of sequences from sequence of sequences, optionally with a header "row". >>> dos_from_table([['hello', 'world'], [1, 2], [3,4]]) == {'hello': [1, 3], 'world': [2, 4]} True """ start_row = 0 if not table: return table if not header: header = table[0] start_row = 1 header_list = header if header and isinstance(header, basestring): header_list = header.split('\t') if len(header_list) != len(table[0]): header_list = header.split(',') if len(header_list) != len(table[0]): header_list = header.split(' ') ans = {} for i, k in enumerate(header): ans[k] = [row[i] for row in table[start_row:]] return ans
[ "def", "dos_from_table", "(", "table", ",", "header", "=", "None", ")", ":", "start_row", "=", "0", "if", "not", "table", ":", "return", "table", "if", "not", "header", ":", "header", "=", "table", "[", "0", "]", "start_row", "=", "1", "header_list", "=", "header", "if", "header", "and", "isinstance", "(", "header", ",", "basestring", ")", ":", "header_list", "=", "header", ".", "split", "(", "'\\t'", ")", "if", "len", "(", "header_list", ")", "!=", "len", "(", "table", "[", "0", "]", ")", ":", "header_list", "=", "header", ".", "split", "(", "','", ")", "if", "len", "(", "header_list", ")", "!=", "len", "(", "table", "[", "0", "]", ")", ":", "header_list", "=", "header", ".", "split", "(", "' '", ")", "ans", "=", "{", "}", "for", "i", ",", "k", "in", "enumerate", "(", "header", ")", ":", "ans", "[", "k", "]", "=", "[", "row", "[", "i", "]", "for", "row", "in", "table", "[", "start_row", ":", "]", "]", "return", "ans" ]
Produce dictionary of sequences from sequence of sequences, optionally with a header "row". >>> dos_from_table([['hello', 'world'], [1, 2], [3,4]]) == {'hello': [1, 3], 'world': [2, 4]} True
[ "Produce", "dictionary", "of", "sequences", "from", "sequence", "of", "sequences", "optionally", "with", "a", "header", "row", "." ]
c43445b14afddfdeadc5f3076675c9e8fc1ee67c
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L847-L869
train
totalgood/pugnlp
src/pugnlp/util.py
transposed_lists
def transposed_lists(list_of_lists, default=None): """Like `numpy.transposed`, but allows uneven row lengths Uneven lengths will affect the order of the elements in the rows of the transposed lists >>> transposed_lists([[1, 2], [3, 4, 5], [6]]) [[1, 3, 6], [2, 4], [5]] >>> transposed_lists(transposed_lists([[], [1, 2, 3], [4]])) [[1, 2, 3], [4]] >>> x = transposed_lists([range(4),[4,5]]) >>> x [[0, 4], [1, 5], [2], [3]] >>> transposed_lists(x) [[0, 1, 2, 3], [4, 5]] """ if default is None or default is [] or default is tuple(): default = [] elif default is 'None': default = [None] else: default = [default] N = len(list_of_lists) Ms = [len(row) for row in list_of_lists] M = max(Ms) ans = [] for j in range(M): ans += [[]] for i in range(N): if j < Ms[i]: ans[-1] += [list_of_lists[i][j]] else: ans[-1] += list(default) return ans
python
def transposed_lists(list_of_lists, default=None): """Like `numpy.transposed`, but allows uneven row lengths Uneven lengths will affect the order of the elements in the rows of the transposed lists >>> transposed_lists([[1, 2], [3, 4, 5], [6]]) [[1, 3, 6], [2, 4], [5]] >>> transposed_lists(transposed_lists([[], [1, 2, 3], [4]])) [[1, 2, 3], [4]] >>> x = transposed_lists([range(4),[4,5]]) >>> x [[0, 4], [1, 5], [2], [3]] >>> transposed_lists(x) [[0, 1, 2, 3], [4, 5]] """ if default is None or default is [] or default is tuple(): default = [] elif default is 'None': default = [None] else: default = [default] N = len(list_of_lists) Ms = [len(row) for row in list_of_lists] M = max(Ms) ans = [] for j in range(M): ans += [[]] for i in range(N): if j < Ms[i]: ans[-1] += [list_of_lists[i][j]] else: ans[-1] += list(default) return ans
[ "def", "transposed_lists", "(", "list_of_lists", ",", "default", "=", "None", ")", ":", "if", "default", "is", "None", "or", "default", "is", "[", "]", "or", "default", "is", "tuple", "(", ")", ":", "default", "=", "[", "]", "elif", "default", "is", "'None'", ":", "default", "=", "[", "None", "]", "else", ":", "default", "=", "[", "default", "]", "N", "=", "len", "(", "list_of_lists", ")", "Ms", "=", "[", "len", "(", "row", ")", "for", "row", "in", "list_of_lists", "]", "M", "=", "max", "(", "Ms", ")", "ans", "=", "[", "]", "for", "j", "in", "range", "(", "M", ")", ":", "ans", "+=", "[", "[", "]", "]", "for", "i", "in", "range", "(", "N", ")", ":", "if", "j", "<", "Ms", "[", "i", "]", ":", "ans", "[", "-", "1", "]", "+=", "[", "list_of_lists", "[", "i", "]", "[", "j", "]", "]", "else", ":", "ans", "[", "-", "1", "]", "+=", "list", "(", "default", ")", "return", "ans" ]
Like `numpy.transposed`, but allows uneven row lengths Uneven lengths will affect the order of the elements in the rows of the transposed lists >>> transposed_lists([[1, 2], [3, 4, 5], [6]]) [[1, 3, 6], [2, 4], [5]] >>> transposed_lists(transposed_lists([[], [1, 2, 3], [4]])) [[1, 2, 3], [4]] >>> x = transposed_lists([range(4),[4,5]]) >>> x [[0, 4], [1, 5], [2], [3]] >>> transposed_lists(x) [[0, 1, 2, 3], [4, 5]]
[ "Like", "numpy", ".", "transposed", "but", "allows", "uneven", "row", "lengths" ]
c43445b14afddfdeadc5f3076675c9e8fc1ee67c
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L872-L905
train
totalgood/pugnlp
src/pugnlp/util.py
hist_from_counts
def hist_from_counts(counts, normalize=False, cumulative=False, to_str=False, sep=',', min_bin=None, max_bin=None): """Compute an emprical histogram, PMF or CDF in a list of lists TESTME: compare results to hist_from_values_list and hist_from_float_values_list """ counters = [dict((i, c)for i, c in enumerate(counts))] intkeys_list = [[c for c in counts_dict if (isinstance(c, int) or (isinstance(c, float) and int(c) == c))] for counts_dict in counters] min_bin, max_bin = min_bin or 0, max_bin or len(counts) - 1 histograms = [] for intkeys, counts in zip(intkeys_list, counters): histograms += [OrderedDict()] if not intkeys: continue if normalize: N = sum(counts[c] for c in intkeys) for c in intkeys: counts[c] = float(counts[c]) / N if cumulative: for i in range(min_bin, max_bin + 1): histograms[-1][i] = counts.get(i, 0) + histograms[-1].get(i - 1, 0) else: for i in range(min_bin, max_bin + 1): histograms[-1][i] = counts.get(i, 0) if not histograms: histograms = [OrderedDict()] # fill in the zero counts between the integer bins of the histogram aligned_histograms = [] for i in range(min_bin, max_bin + 1): aligned_histograms += [tuple([i] + [hist.get(i, 0) for hist in histograms])] if to_str: # FIXME: add header row return str_from_table(aligned_histograms, sep=sep, max_rows=365 * 2 + 1) return aligned_histograms
python
def hist_from_counts(counts, normalize=False, cumulative=False, to_str=False, sep=',', min_bin=None, max_bin=None): """Compute an emprical histogram, PMF or CDF in a list of lists TESTME: compare results to hist_from_values_list and hist_from_float_values_list """ counters = [dict((i, c)for i, c in enumerate(counts))] intkeys_list = [[c for c in counts_dict if (isinstance(c, int) or (isinstance(c, float) and int(c) == c))] for counts_dict in counters] min_bin, max_bin = min_bin or 0, max_bin or len(counts) - 1 histograms = [] for intkeys, counts in zip(intkeys_list, counters): histograms += [OrderedDict()] if not intkeys: continue if normalize: N = sum(counts[c] for c in intkeys) for c in intkeys: counts[c] = float(counts[c]) / N if cumulative: for i in range(min_bin, max_bin + 1): histograms[-1][i] = counts.get(i, 0) + histograms[-1].get(i - 1, 0) else: for i in range(min_bin, max_bin + 1): histograms[-1][i] = counts.get(i, 0) if not histograms: histograms = [OrderedDict()] # fill in the zero counts between the integer bins of the histogram aligned_histograms = [] for i in range(min_bin, max_bin + 1): aligned_histograms += [tuple([i] + [hist.get(i, 0) for hist in histograms])] if to_str: # FIXME: add header row return str_from_table(aligned_histograms, sep=sep, max_rows=365 * 2 + 1) return aligned_histograms
[ "def", "hist_from_counts", "(", "counts", ",", "normalize", "=", "False", ",", "cumulative", "=", "False", ",", "to_str", "=", "False", ",", "sep", "=", "','", ",", "min_bin", "=", "None", ",", "max_bin", "=", "None", ")", ":", "counters", "=", "[", "dict", "(", "(", "i", ",", "c", ")", "for", "i", ",", "c", "in", "enumerate", "(", "counts", ")", ")", "]", "intkeys_list", "=", "[", "[", "c", "for", "c", "in", "counts_dict", "if", "(", "isinstance", "(", "c", ",", "int", ")", "or", "(", "isinstance", "(", "c", ",", "float", ")", "and", "int", "(", "c", ")", "==", "c", ")", ")", "]", "for", "counts_dict", "in", "counters", "]", "min_bin", ",", "max_bin", "=", "min_bin", "or", "0", ",", "max_bin", "or", "len", "(", "counts", ")", "-", "1", "histograms", "=", "[", "]", "for", "intkeys", ",", "counts", "in", "zip", "(", "intkeys_list", ",", "counters", ")", ":", "histograms", "+=", "[", "OrderedDict", "(", ")", "]", "if", "not", "intkeys", ":", "continue", "if", "normalize", ":", "N", "=", "sum", "(", "counts", "[", "c", "]", "for", "c", "in", "intkeys", ")", "for", "c", "in", "intkeys", ":", "counts", "[", "c", "]", "=", "float", "(", "counts", "[", "c", "]", ")", "/", "N", "if", "cumulative", ":", "for", "i", "in", "range", "(", "min_bin", ",", "max_bin", "+", "1", ")", ":", "histograms", "[", "-", "1", "]", "[", "i", "]", "=", "counts", ".", "get", "(", "i", ",", "0", ")", "+", "histograms", "[", "-", "1", "]", ".", "get", "(", "i", "-", "1", ",", "0", ")", "else", ":", "for", "i", "in", "range", "(", "min_bin", ",", "max_bin", "+", "1", ")", ":", "histograms", "[", "-", "1", "]", "[", "i", "]", "=", "counts", ".", "get", "(", "i", ",", "0", ")", "if", "not", "histograms", ":", "histograms", "=", "[", "OrderedDict", "(", ")", "]", "# fill in the zero counts between the integer bins of the histogram", "aligned_histograms", "=", "[", "]", "for", "i", "in", "range", "(", "min_bin", ",", "max_bin", "+", "1", ")", ":", "aligned_histograms", "+=", "[", "tuple", "(", "[", "i", "]", "+", "[", "hist", ".", "get", "(", "i", ",", "0", ")", "for", "hist", "in", "histograms", "]", ")", "]", "if", "to_str", ":", "# FIXME: add header row", "return", "str_from_table", "(", "aligned_histograms", ",", "sep", "=", "sep", ",", "max_rows", "=", "365", "*", "2", "+", "1", ")", "return", "aligned_histograms" ]
Compute an emprical histogram, PMF or CDF in a list of lists TESTME: compare results to hist_from_values_list and hist_from_float_values_list
[ "Compute", "an", "emprical", "histogram", "PMF", "or", "CDF", "in", "a", "list", "of", "lists" ]
c43445b14afddfdeadc5f3076675c9e8fc1ee67c
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L972-L1011
train
totalgood/pugnlp
src/pugnlp/util.py
get_similar
def get_similar(obj, labels, default=None, min_similarity=0.5): """Similar to fuzzy_get, but allows non-string keys and a list of possible keys Searches attributes in addition to keys and indexes to find the closest match. See Also: `fuzzy_get` """ raise NotImplementedError( "Unfinished implementation, needs to be in fuzzy_get where list of scores & keywords is sorted.") labels = listify(labels) def not_found(*args, **kwargs): return 0 min_score = int(min_similarity * 100) for similarity_score in [100, 95, 90, 80, 70, 50, 30, 10, 5, 0]: if similarity_score <= min_score: similarity_score = min_score for label in labels: try: result = obj.get(label, not_found) except AttributeError: try: result = obj.__getitem__(label) except (IndexError, TypeError): result = not_found if result is not not_found: return result if similarity_score == min_score: if result is not not_found: return result
python
def get_similar(obj, labels, default=None, min_similarity=0.5): """Similar to fuzzy_get, but allows non-string keys and a list of possible keys Searches attributes in addition to keys and indexes to find the closest match. See Also: `fuzzy_get` """ raise NotImplementedError( "Unfinished implementation, needs to be in fuzzy_get where list of scores & keywords is sorted.") labels = listify(labels) def not_found(*args, **kwargs): return 0 min_score = int(min_similarity * 100) for similarity_score in [100, 95, 90, 80, 70, 50, 30, 10, 5, 0]: if similarity_score <= min_score: similarity_score = min_score for label in labels: try: result = obj.get(label, not_found) except AttributeError: try: result = obj.__getitem__(label) except (IndexError, TypeError): result = not_found if result is not not_found: return result if similarity_score == min_score: if result is not not_found: return result
[ "def", "get_similar", "(", "obj", ",", "labels", ",", "default", "=", "None", ",", "min_similarity", "=", "0.5", ")", ":", "raise", "NotImplementedError", "(", "\"Unfinished implementation, needs to be in fuzzy_get where list of scores & keywords is sorted.\"", ")", "labels", "=", "listify", "(", "labels", ")", "def", "not_found", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "0", "min_score", "=", "int", "(", "min_similarity", "*", "100", ")", "for", "similarity_score", "in", "[", "100", ",", "95", ",", "90", ",", "80", ",", "70", ",", "50", ",", "30", ",", "10", ",", "5", ",", "0", "]", ":", "if", "similarity_score", "<=", "min_score", ":", "similarity_score", "=", "min_score", "for", "label", "in", "labels", ":", "try", ":", "result", "=", "obj", ".", "get", "(", "label", ",", "not_found", ")", "except", "AttributeError", ":", "try", ":", "result", "=", "obj", ".", "__getitem__", "(", "label", ")", "except", "(", "IndexError", ",", "TypeError", ")", ":", "result", "=", "not_found", "if", "result", "is", "not", "not_found", ":", "return", "result", "if", "similarity_score", "==", "min_score", ":", "if", "result", "is", "not", "not_found", ":", "return", "result" ]
Similar to fuzzy_get, but allows non-string keys and a list of possible keys Searches attributes in addition to keys and indexes to find the closest match. See Also: `fuzzy_get`
[ "Similar", "to", "fuzzy_get", "but", "allows", "non", "-", "string", "keys", "and", "a", "list", "of", "possible", "keys" ]
c43445b14afddfdeadc5f3076675c9e8fc1ee67c
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L1094-L1126
train
totalgood/pugnlp
src/pugnlp/util.py
update_file_ext
def update_file_ext(filename, ext='txt', sep='.'): r"""Force the file or path str to end with the indicated extension Note: a dot (".") is assumed to delimit the extension >>> from __future__ import unicode_literals >>> update_file_ext('/home/hobs/extremofile', 'bac') '/home/hobs/extremofile.bac' >>> update_file_ext('/home/hobs/piano.file/', 'music') '/home/hobs/piano.file/.music' >>> update_file_ext('/home/ninja.hobs/Anglofile', '.uk') '/home/ninja.hobs/Anglofile.uk' >>> update_file_ext('/home/ninja-corsi/audio', 'file', sep='-') '/home/ninja-corsi/audio-file' """ path, filename = os.path.split(filename) if ext and ext[0] == sep: ext = ext[1:] return os.path.join(path, sep.join(filename.split(sep)[:-1 if filename.count(sep) > 1 else 1] + [ext]))
python
def update_file_ext(filename, ext='txt', sep='.'): r"""Force the file or path str to end with the indicated extension Note: a dot (".") is assumed to delimit the extension >>> from __future__ import unicode_literals >>> update_file_ext('/home/hobs/extremofile', 'bac') '/home/hobs/extremofile.bac' >>> update_file_ext('/home/hobs/piano.file/', 'music') '/home/hobs/piano.file/.music' >>> update_file_ext('/home/ninja.hobs/Anglofile', '.uk') '/home/ninja.hobs/Anglofile.uk' >>> update_file_ext('/home/ninja-corsi/audio', 'file', sep='-') '/home/ninja-corsi/audio-file' """ path, filename = os.path.split(filename) if ext and ext[0] == sep: ext = ext[1:] return os.path.join(path, sep.join(filename.split(sep)[:-1 if filename.count(sep) > 1 else 1] + [ext]))
[ "def", "update_file_ext", "(", "filename", ",", "ext", "=", "'txt'", ",", "sep", "=", "'.'", ")", ":", "path", ",", "filename", "=", "os", ".", "path", ".", "split", "(", "filename", ")", "if", "ext", "and", "ext", "[", "0", "]", "==", "sep", ":", "ext", "=", "ext", "[", "1", ":", "]", "return", "os", ".", "path", ".", "join", "(", "path", ",", "sep", ".", "join", "(", "filename", ".", "split", "(", "sep", ")", "[", ":", "-", "1", "if", "filename", ".", "count", "(", "sep", ")", ">", "1", "else", "1", "]", "+", "[", "ext", "]", ")", ")" ]
r"""Force the file or path str to end with the indicated extension Note: a dot (".") is assumed to delimit the extension >>> from __future__ import unicode_literals >>> update_file_ext('/home/hobs/extremofile', 'bac') '/home/hobs/extremofile.bac' >>> update_file_ext('/home/hobs/piano.file/', 'music') '/home/hobs/piano.file/.music' >>> update_file_ext('/home/ninja.hobs/Anglofile', '.uk') '/home/ninja.hobs/Anglofile.uk' >>> update_file_ext('/home/ninja-corsi/audio', 'file', sep='-') '/home/ninja-corsi/audio-file'
[ "r", "Force", "the", "file", "or", "path", "str", "to", "end", "with", "the", "indicated", "extension" ]
c43445b14afddfdeadc5f3076675c9e8fc1ee67c
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L1328-L1347
train
totalgood/pugnlp
src/pugnlp/util.py
transcode
def transcode(infile, outfile=None, incoding="shift-jis", outcoding="utf-8"): """Change encoding of text file""" if not outfile: outfile = os.path.basename(infile) + '.utf8' with codecs.open(infile, "rb", incoding) as fpin: with codecs.open(outfile, "wb", outcoding) as fpout: fpout.write(fpin.read())
python
def transcode(infile, outfile=None, incoding="shift-jis", outcoding="utf-8"): """Change encoding of text file""" if not outfile: outfile = os.path.basename(infile) + '.utf8' with codecs.open(infile, "rb", incoding) as fpin: with codecs.open(outfile, "wb", outcoding) as fpout: fpout.write(fpin.read())
[ "def", "transcode", "(", "infile", ",", "outfile", "=", "None", ",", "incoding", "=", "\"shift-jis\"", ",", "outcoding", "=", "\"utf-8\"", ")", ":", "if", "not", "outfile", ":", "outfile", "=", "os", ".", "path", ".", "basename", "(", "infile", ")", "+", "'.utf8'", "with", "codecs", ".", "open", "(", "infile", ",", "\"rb\"", ",", "incoding", ")", "as", "fpin", ":", "with", "codecs", ".", "open", "(", "outfile", ",", "\"wb\"", ",", "outcoding", ")", "as", "fpout", ":", "fpout", ".", "write", "(", "fpin", ".", "read", "(", ")", ")" ]
Change encoding of text file
[ "Change", "encoding", "of", "text", "file" ]
c43445b14afddfdeadc5f3076675c9e8fc1ee67c
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L1404-L1410
train
totalgood/pugnlp
src/pugnlp/util.py
dict2obj
def dict2obj(d): """Convert a dict to an object or namespace >>> d = {'a': 1, 'b': {'c': 2}, 'd': ["hi", {'foo': "bar"}]} >>> obj = dict2obj(d) >>> obj.b.c 2 >>> obj.d ['hi', {'foo': 'bar'}] >>> d = {'a': 1, 'b': {'c': 2}, 'd': [("hi", {'foo': "bar"})]} >>> obj = dict2obj(d) >>> obj.d.hi.foo 'bar' """ if isinstance(d, (Mapping, list, tuple)): try: d = dict(d) except (ValueError, TypeError): return d else: return d obj = Object() for k, v in viewitems(d): obj.__dict__[k] = dict2obj(v) return obj
python
def dict2obj(d): """Convert a dict to an object or namespace >>> d = {'a': 1, 'b': {'c': 2}, 'd': ["hi", {'foo': "bar"}]} >>> obj = dict2obj(d) >>> obj.b.c 2 >>> obj.d ['hi', {'foo': 'bar'}] >>> d = {'a': 1, 'b': {'c': 2}, 'd': [("hi", {'foo': "bar"})]} >>> obj = dict2obj(d) >>> obj.d.hi.foo 'bar' """ if isinstance(d, (Mapping, list, tuple)): try: d = dict(d) except (ValueError, TypeError): return d else: return d obj = Object() for k, v in viewitems(d): obj.__dict__[k] = dict2obj(v) return obj
[ "def", "dict2obj", "(", "d", ")", ":", "if", "isinstance", "(", "d", ",", "(", "Mapping", ",", "list", ",", "tuple", ")", ")", ":", "try", ":", "d", "=", "dict", "(", "d", ")", "except", "(", "ValueError", ",", "TypeError", ")", ":", "return", "d", "else", ":", "return", "d", "obj", "=", "Object", "(", ")", "for", "k", ",", "v", "in", "viewitems", "(", "d", ")", ":", "obj", ".", "__dict__", "[", "k", "]", "=", "dict2obj", "(", "v", ")", "return", "obj" ]
Convert a dict to an object or namespace >>> d = {'a': 1, 'b': {'c': 2}, 'd': ["hi", {'foo': "bar"}]} >>> obj = dict2obj(d) >>> obj.b.c 2 >>> obj.d ['hi', {'foo': 'bar'}] >>> d = {'a': 1, 'b': {'c': 2}, 'd': [("hi", {'foo': "bar"})]} >>> obj = dict2obj(d) >>> obj.d.hi.foo 'bar'
[ "Convert", "a", "dict", "to", "an", "object", "or", "namespace" ]
c43445b14afddfdeadc5f3076675c9e8fc1ee67c
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L1602-L1627
train
totalgood/pugnlp
src/pugnlp/util.py
int_pair
def int_pair(s, default=(0, None)): """Return the digits to either side of a single non-digit character as a 2-tuple of integers >>> int_pair('90210-007') (90210, 7) >>> int_pair('04321.0123') (4321, 123) """ s = re.split(r'[^0-9]+', str(s).strip()) if len(s) and len(s[0]): if len(s) > 1 and len(s[1]): return (int(s[0]), int(s[1])) return (int(s[0]), default[1]) return default
python
def int_pair(s, default=(0, None)): """Return the digits to either side of a single non-digit character as a 2-tuple of integers >>> int_pair('90210-007') (90210, 7) >>> int_pair('04321.0123') (4321, 123) """ s = re.split(r'[^0-9]+', str(s).strip()) if len(s) and len(s[0]): if len(s) > 1 and len(s[1]): return (int(s[0]), int(s[1])) return (int(s[0]), default[1]) return default
[ "def", "int_pair", "(", "s", ",", "default", "=", "(", "0", ",", "None", ")", ")", ":", "s", "=", "re", ".", "split", "(", "r'[^0-9]+'", ",", "str", "(", "s", ")", ".", "strip", "(", ")", ")", "if", "len", "(", "s", ")", "and", "len", "(", "s", "[", "0", "]", ")", ":", "if", "len", "(", "s", ")", ">", "1", "and", "len", "(", "s", "[", "1", "]", ")", ":", "return", "(", "int", "(", "s", "[", "0", "]", ")", ",", "int", "(", "s", "[", "1", "]", ")", ")", "return", "(", "int", "(", "s", "[", "0", "]", ")", ",", "default", "[", "1", "]", ")", "return", "default" ]
Return the digits to either side of a single non-digit character as a 2-tuple of integers >>> int_pair('90210-007') (90210, 7) >>> int_pair('04321.0123') (4321, 123)
[ "Return", "the", "digits", "to", "either", "side", "of", "a", "single", "non", "-", "digit", "character", "as", "a", "2", "-", "tuple", "of", "integers" ]
c43445b14afddfdeadc5f3076675c9e8fc1ee67c
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L1859-L1872
train
totalgood/pugnlp
src/pugnlp/util.py
make_float
def make_float(s, default='', ignore_commas=True): r"""Coerce a string into a float >>> make_float('12,345') 12345.0 >>> make_float('12.345') 12.345 >>> make_float('1+2') 3.0 >>> make_float('+42.0') 42.0 >>> make_float('\r\n-42?\r\n') -42.0 >>> make_float('$42.42') 42.42 >>> make_float('B-52') -52.0 >>> make_float('1.2 x 10^34') 1.2e+34 >>> make_float(float('nan')) nan >>> make_float(float('-INF')) -inf """ if ignore_commas and isinstance(s, basestring): s = s.replace(',', '') try: return float(s) except (IndexError, ValueError, AttributeError, TypeError): try: return float(str(s)) except ValueError: try: return float(normalize_scientific_notation(str(s), ignore_commas)) except ValueError: try: return float(first_digits(s)) except ValueError: return default
python
def make_float(s, default='', ignore_commas=True): r"""Coerce a string into a float >>> make_float('12,345') 12345.0 >>> make_float('12.345') 12.345 >>> make_float('1+2') 3.0 >>> make_float('+42.0') 42.0 >>> make_float('\r\n-42?\r\n') -42.0 >>> make_float('$42.42') 42.42 >>> make_float('B-52') -52.0 >>> make_float('1.2 x 10^34') 1.2e+34 >>> make_float(float('nan')) nan >>> make_float(float('-INF')) -inf """ if ignore_commas and isinstance(s, basestring): s = s.replace(',', '') try: return float(s) except (IndexError, ValueError, AttributeError, TypeError): try: return float(str(s)) except ValueError: try: return float(normalize_scientific_notation(str(s), ignore_commas)) except ValueError: try: return float(first_digits(s)) except ValueError: return default
[ "def", "make_float", "(", "s", ",", "default", "=", "''", ",", "ignore_commas", "=", "True", ")", ":", "if", "ignore_commas", "and", "isinstance", "(", "s", ",", "basestring", ")", ":", "s", "=", "s", ".", "replace", "(", "','", ",", "''", ")", "try", ":", "return", "float", "(", "s", ")", "except", "(", "IndexError", ",", "ValueError", ",", "AttributeError", ",", "TypeError", ")", ":", "try", ":", "return", "float", "(", "str", "(", "s", ")", ")", "except", "ValueError", ":", "try", ":", "return", "float", "(", "normalize_scientific_notation", "(", "str", "(", "s", ")", ",", "ignore_commas", ")", ")", "except", "ValueError", ":", "try", ":", "return", "float", "(", "first_digits", "(", "s", ")", ")", "except", "ValueError", ":", "return", "default" ]
r"""Coerce a string into a float >>> make_float('12,345') 12345.0 >>> make_float('12.345') 12.345 >>> make_float('1+2') 3.0 >>> make_float('+42.0') 42.0 >>> make_float('\r\n-42?\r\n') -42.0 >>> make_float('$42.42') 42.42 >>> make_float('B-52') -52.0 >>> make_float('1.2 x 10^34') 1.2e+34 >>> make_float(float('nan')) nan >>> make_float(float('-INF')) -inf
[ "r", "Coerce", "a", "string", "into", "a", "float" ]
c43445b14afddfdeadc5f3076675c9e8fc1ee67c
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L1903-L1941
train
totalgood/pugnlp
src/pugnlp/util.py
normalize_names
def normalize_names(names): """Coerce a string or nested list of strings into a flat list of strings.""" if isinstance(names, basestring): names = names.split(',') names = listify(names) return [str(name).strip() for name in names]
python
def normalize_names(names): """Coerce a string or nested list of strings into a flat list of strings.""" if isinstance(names, basestring): names = names.split(',') names = listify(names) return [str(name).strip() for name in names]
[ "def", "normalize_names", "(", "names", ")", ":", "if", "isinstance", "(", "names", ",", "basestring", ")", ":", "names", "=", "names", ".", "split", "(", "','", ")", "names", "=", "listify", "(", "names", ")", "return", "[", "str", "(", "name", ")", ".", "strip", "(", ")", "for", "name", "in", "names", "]" ]
Coerce a string or nested list of strings into a flat list of strings.
[ "Coerce", "a", "string", "or", "nested", "list", "of", "strings", "into", "a", "flat", "list", "of", "strings", "." ]
c43445b14afddfdeadc5f3076675c9e8fc1ee67c
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L2023-L2028
train
totalgood/pugnlp
src/pugnlp/util.py
normalize_serial_number
def normalize_serial_number(sn, max_length=None, left_fill='0', right_fill=str(), blank=str(), valid_chars=' -0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ', invalid_chars=None, strip_whitespace=True, join=False, na=rex.nones): r"""Make a string compatible with typical serial number requirements # Default configuration strips internal and external whitespaces and retains only the last 10 characters >>> normalize_serial_number('1C 234567890 ') '0234567890' >>> normalize_serial_number('1C 234567890 ', max_length=20) '000000001C 234567890' >>> normalize_serial_number('Unknown', blank=None, left_fill=str()) '' >>> normalize_serial_number('N/A', blank='', left_fill=str()) 'A' >>> normalize_serial_number('1C 234567890 ', max_length=20, left_fill='') '1C 234567890' Notice how the max_length setting (20) carries over from the previous test! >>> len(normalize_serial_number('Unknown', blank=False)) 20 >>> normalize_serial_number('Unknown', blank=False) '00000000000000000000' >>> normalize_serial_number(' \t1C\t-\t234567890 \x00\x7f', max_length=14, left_fill='0', ... valid_chars='0123456789ABC', invalid_chars=None, join=True) '1C\t-\t234567890' Notice how the max_length setting carries over from the previous test! >>> len(normalize_serial_number('Unknown', blank=False)) 14 Restore the default max_length setting >>> len(normalize_serial_number('Unknown', blank=False, max_length=10)) 10 >>> normalize_serial_number('NO SERIAL', blank='--=--', left_fill='') # doctest: +NORMALIZE_WHITESPACE 'NO SERIAL' >>> normalize_serial_number('NO SERIAL', blank='', left_fill='') # doctest: +NORMALIZE_WHITESPACE 'NO SERIAL' >>> normalize_serial_number('1C 234567890 ', valid_chars='0123456789') '0234567890' """ # All 9 kwargs have persistent default values stored as attributes of the funcion instance if max_length is None: max_length = normalize_serial_number.max_length else: normalize_serial_number.max_length = max_length if left_fill is None: left_fill = normalize_serial_number.left_fill else: normalize_serial_number.left_fill = left_fill if right_fill is None: right_fill = normalize_serial_number.right_fill else: normalize_serial_number.right_fill = right_fill if blank is None: blank = normalize_serial_number.blank else: normalize_serial_number.blank = blank if valid_chars is None: valid_chars = normalize_serial_number.valid_chars else: normalize_serial_number.valid_chars = valid_chars if invalid_chars is None: invalid_chars = normalize_serial_number.invalid_chars else: normalize_serial_number.invalid_chars = invalid_chars if strip_whitespace is None: strip_whitespace = normalize_serial_number.strip_whitespace else: normalize_serial_number.strip_whitespace = strip_whitespace if join is None: join = normalize_serial_number.join else: normalize_serial_number.join = join if na is None: na = normalize_serial_number.na else: normalize_serial_number.na = na if invalid_chars is None: invalid_chars = (c for c in charlist.ascii_all if c not in valid_chars) invalid_chars = ''.join(invalid_chars) sn = str(sn).strip(invalid_chars) if strip_whitespace: sn = sn.strip() if invalid_chars: if join: sn = sn.translate(dict(zip(invalid_chars, [''] * len(invalid_chars)))) else: sn = multisplit(sn, invalid_chars)[-1] sn = sn[-max_length:] if strip_whitespace: sn = sn.strip() if na: if isinstance(na, (tuple, set, dict, list)) and sn in na: sn = '' elif na.match(sn): sn = '' if not sn and not (blank is False): return blank if left_fill: sn = left_fill * int(max_length - len(sn) / len(left_fill)) + sn if right_fill: sn = sn + right_fill * (max_length - len(sn) / len(right_fill)) return sn
python
def normalize_serial_number(sn, max_length=None, left_fill='0', right_fill=str(), blank=str(), valid_chars=' -0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ', invalid_chars=None, strip_whitespace=True, join=False, na=rex.nones): r"""Make a string compatible with typical serial number requirements # Default configuration strips internal and external whitespaces and retains only the last 10 characters >>> normalize_serial_number('1C 234567890 ') '0234567890' >>> normalize_serial_number('1C 234567890 ', max_length=20) '000000001C 234567890' >>> normalize_serial_number('Unknown', blank=None, left_fill=str()) '' >>> normalize_serial_number('N/A', blank='', left_fill=str()) 'A' >>> normalize_serial_number('1C 234567890 ', max_length=20, left_fill='') '1C 234567890' Notice how the max_length setting (20) carries over from the previous test! >>> len(normalize_serial_number('Unknown', blank=False)) 20 >>> normalize_serial_number('Unknown', blank=False) '00000000000000000000' >>> normalize_serial_number(' \t1C\t-\t234567890 \x00\x7f', max_length=14, left_fill='0', ... valid_chars='0123456789ABC', invalid_chars=None, join=True) '1C\t-\t234567890' Notice how the max_length setting carries over from the previous test! >>> len(normalize_serial_number('Unknown', blank=False)) 14 Restore the default max_length setting >>> len(normalize_serial_number('Unknown', blank=False, max_length=10)) 10 >>> normalize_serial_number('NO SERIAL', blank='--=--', left_fill='') # doctest: +NORMALIZE_WHITESPACE 'NO SERIAL' >>> normalize_serial_number('NO SERIAL', blank='', left_fill='') # doctest: +NORMALIZE_WHITESPACE 'NO SERIAL' >>> normalize_serial_number('1C 234567890 ', valid_chars='0123456789') '0234567890' """ # All 9 kwargs have persistent default values stored as attributes of the funcion instance if max_length is None: max_length = normalize_serial_number.max_length else: normalize_serial_number.max_length = max_length if left_fill is None: left_fill = normalize_serial_number.left_fill else: normalize_serial_number.left_fill = left_fill if right_fill is None: right_fill = normalize_serial_number.right_fill else: normalize_serial_number.right_fill = right_fill if blank is None: blank = normalize_serial_number.blank else: normalize_serial_number.blank = blank if valid_chars is None: valid_chars = normalize_serial_number.valid_chars else: normalize_serial_number.valid_chars = valid_chars if invalid_chars is None: invalid_chars = normalize_serial_number.invalid_chars else: normalize_serial_number.invalid_chars = invalid_chars if strip_whitespace is None: strip_whitespace = normalize_serial_number.strip_whitespace else: normalize_serial_number.strip_whitespace = strip_whitespace if join is None: join = normalize_serial_number.join else: normalize_serial_number.join = join if na is None: na = normalize_serial_number.na else: normalize_serial_number.na = na if invalid_chars is None: invalid_chars = (c for c in charlist.ascii_all if c not in valid_chars) invalid_chars = ''.join(invalid_chars) sn = str(sn).strip(invalid_chars) if strip_whitespace: sn = sn.strip() if invalid_chars: if join: sn = sn.translate(dict(zip(invalid_chars, [''] * len(invalid_chars)))) else: sn = multisplit(sn, invalid_chars)[-1] sn = sn[-max_length:] if strip_whitespace: sn = sn.strip() if na: if isinstance(na, (tuple, set, dict, list)) and sn in na: sn = '' elif na.match(sn): sn = '' if not sn and not (blank is False): return blank if left_fill: sn = left_fill * int(max_length - len(sn) / len(left_fill)) + sn if right_fill: sn = sn + right_fill * (max_length - len(sn) / len(right_fill)) return sn
[ "def", "normalize_serial_number", "(", "sn", ",", "max_length", "=", "None", ",", "left_fill", "=", "'0'", ",", "right_fill", "=", "str", "(", ")", ",", "blank", "=", "str", "(", ")", ",", "valid_chars", "=", "' -0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'", ",", "invalid_chars", "=", "None", ",", "strip_whitespace", "=", "True", ",", "join", "=", "False", ",", "na", "=", "rex", ".", "nones", ")", ":", "# All 9 kwargs have persistent default values stored as attributes of the funcion instance", "if", "max_length", "is", "None", ":", "max_length", "=", "normalize_serial_number", ".", "max_length", "else", ":", "normalize_serial_number", ".", "max_length", "=", "max_length", "if", "left_fill", "is", "None", ":", "left_fill", "=", "normalize_serial_number", ".", "left_fill", "else", ":", "normalize_serial_number", ".", "left_fill", "=", "left_fill", "if", "right_fill", "is", "None", ":", "right_fill", "=", "normalize_serial_number", ".", "right_fill", "else", ":", "normalize_serial_number", ".", "right_fill", "=", "right_fill", "if", "blank", "is", "None", ":", "blank", "=", "normalize_serial_number", ".", "blank", "else", ":", "normalize_serial_number", ".", "blank", "=", "blank", "if", "valid_chars", "is", "None", ":", "valid_chars", "=", "normalize_serial_number", ".", "valid_chars", "else", ":", "normalize_serial_number", ".", "valid_chars", "=", "valid_chars", "if", "invalid_chars", "is", "None", ":", "invalid_chars", "=", "normalize_serial_number", ".", "invalid_chars", "else", ":", "normalize_serial_number", ".", "invalid_chars", "=", "invalid_chars", "if", "strip_whitespace", "is", "None", ":", "strip_whitespace", "=", "normalize_serial_number", ".", "strip_whitespace", "else", ":", "normalize_serial_number", ".", "strip_whitespace", "=", "strip_whitespace", "if", "join", "is", "None", ":", "join", "=", "normalize_serial_number", ".", "join", "else", ":", "normalize_serial_number", ".", "join", "=", "join", "if", "na", "is", "None", ":", "na", "=", "normalize_serial_number", ".", "na", "else", ":", "normalize_serial_number", ".", "na", "=", "na", "if", "invalid_chars", "is", "None", ":", "invalid_chars", "=", "(", "c", "for", "c", "in", "charlist", ".", "ascii_all", "if", "c", "not", "in", "valid_chars", ")", "invalid_chars", "=", "''", ".", "join", "(", "invalid_chars", ")", "sn", "=", "str", "(", "sn", ")", ".", "strip", "(", "invalid_chars", ")", "if", "strip_whitespace", ":", "sn", "=", "sn", ".", "strip", "(", ")", "if", "invalid_chars", ":", "if", "join", ":", "sn", "=", "sn", ".", "translate", "(", "dict", "(", "zip", "(", "invalid_chars", ",", "[", "''", "]", "*", "len", "(", "invalid_chars", ")", ")", ")", ")", "else", ":", "sn", "=", "multisplit", "(", "sn", ",", "invalid_chars", ")", "[", "-", "1", "]", "sn", "=", "sn", "[", "-", "max_length", ":", "]", "if", "strip_whitespace", ":", "sn", "=", "sn", ".", "strip", "(", ")", "if", "na", ":", "if", "isinstance", "(", "na", ",", "(", "tuple", ",", "set", ",", "dict", ",", "list", ")", ")", "and", "sn", "in", "na", ":", "sn", "=", "''", "elif", "na", ".", "match", "(", "sn", ")", ":", "sn", "=", "''", "if", "not", "sn", "and", "not", "(", "blank", "is", "False", ")", ":", "return", "blank", "if", "left_fill", ":", "sn", "=", "left_fill", "*", "int", "(", "max_length", "-", "len", "(", "sn", ")", "/", "len", "(", "left_fill", ")", ")", "+", "sn", "if", "right_fill", ":", "sn", "=", "sn", "+", "right_fill", "*", "(", "max_length", "-", "len", "(", "sn", ")", "/", "len", "(", "right_fill", ")", ")", "return", "sn" ]
r"""Make a string compatible with typical serial number requirements # Default configuration strips internal and external whitespaces and retains only the last 10 characters >>> normalize_serial_number('1C 234567890 ') '0234567890' >>> normalize_serial_number('1C 234567890 ', max_length=20) '000000001C 234567890' >>> normalize_serial_number('Unknown', blank=None, left_fill=str()) '' >>> normalize_serial_number('N/A', blank='', left_fill=str()) 'A' >>> normalize_serial_number('1C 234567890 ', max_length=20, left_fill='') '1C 234567890' Notice how the max_length setting (20) carries over from the previous test! >>> len(normalize_serial_number('Unknown', blank=False)) 20 >>> normalize_serial_number('Unknown', blank=False) '00000000000000000000' >>> normalize_serial_number(' \t1C\t-\t234567890 \x00\x7f', max_length=14, left_fill='0', ... valid_chars='0123456789ABC', invalid_chars=None, join=True) '1C\t-\t234567890' Notice how the max_length setting carries over from the previous test! >>> len(normalize_serial_number('Unknown', blank=False)) 14 Restore the default max_length setting >>> len(normalize_serial_number('Unknown', blank=False, max_length=10)) 10 >>> normalize_serial_number('NO SERIAL', blank='--=--', left_fill='') # doctest: +NORMALIZE_WHITESPACE 'NO SERIAL' >>> normalize_serial_number('NO SERIAL', blank='', left_fill='') # doctest: +NORMALIZE_WHITESPACE 'NO SERIAL' >>> normalize_serial_number('1C 234567890 ', valid_chars='0123456789') '0234567890'
[ "r", "Make", "a", "string", "compatible", "with", "typical", "serial", "number", "requirements" ]
c43445b14afddfdeadc5f3076675c9e8fc1ee67c
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L2064-L2173
train
totalgood/pugnlp
src/pugnlp/util.py
strip_HTML
def strip_HTML(s): """Simple, clumsy, slow HTML tag stripper""" result = '' total = 0 for c in s: if c == '<': total = 1 elif c == '>': total = 0 result += ' ' elif total == 0: result += c return result
python
def strip_HTML(s): """Simple, clumsy, slow HTML tag stripper""" result = '' total = 0 for c in s: if c == '<': total = 1 elif c == '>': total = 0 result += ' ' elif total == 0: result += c return result
[ "def", "strip_HTML", "(", "s", ")", ":", "result", "=", "''", "total", "=", "0", "for", "c", "in", "s", ":", "if", "c", "==", "'<'", ":", "total", "=", "1", "elif", "c", "==", "'>'", ":", "total", "=", "0", "result", "+=", "' '", "elif", "total", "==", "0", ":", "result", "+=", "c", "return", "result" ]
Simple, clumsy, slow HTML tag stripper
[ "Simple", "clumsy", "slow", "HTML", "tag", "stripper" ]
c43445b14afddfdeadc5f3076675c9e8fc1ee67c
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L2256-L2268
train
totalgood/pugnlp
src/pugnlp/util.py
tabulate
def tabulate(lol, headers, eol='\n'): """Use the pypi tabulate package instead!""" yield '| %s |' % ' | '.join(headers) + eol yield '| %s:|' % ':| '.join(['-' * len(w) for w in headers]) + eol for row in lol: yield '| %s |' % ' | '.join(str(c) for c in row) + eol
python
def tabulate(lol, headers, eol='\n'): """Use the pypi tabulate package instead!""" yield '| %s |' % ' | '.join(headers) + eol yield '| %s:|' % ':| '.join(['-' * len(w) for w in headers]) + eol for row in lol: yield '| %s |' % ' | '.join(str(c) for c in row) + eol
[ "def", "tabulate", "(", "lol", ",", "headers", ",", "eol", "=", "'\\n'", ")", ":", "yield", "'| %s |'", "%", "' | '", ".", "join", "(", "headers", ")", "+", "eol", "yield", "'| %s:|'", "%", "':| '", ".", "join", "(", "[", "'-'", "*", "len", "(", "w", ")", "for", "w", "in", "headers", "]", ")", "+", "eol", "for", "row", "in", "lol", ":", "yield", "'| %s |'", "%", "' | '", ".", "join", "(", "str", "(", "c", ")", "for", "c", "in", "row", ")", "+", "eol" ]
Use the pypi tabulate package instead!
[ "Use", "the", "pypi", "tabulate", "package", "instead!" ]
c43445b14afddfdeadc5f3076675c9e8fc1ee67c
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L2396-L2401
train
totalgood/pugnlp
src/pugnlp/util.py
listify
def listify(values, N=1, delim=None): """Return an N-length list, with elements values, extrapolating as necessary. >>> listify("don't split into characters") ["don't split into characters"] >>> listify("len = 3", 3) ['len = 3', 'len = 3', 'len = 3'] >>> listify("But split on a delimeter, if requested.", delim=',') ['But split on a delimeter', ' if requested.'] >>> listify(["obj 1", "obj 2", "len = 4"], N=4) ['obj 1', 'obj 2', 'len = 4', 'len = 4'] >>> listify(iter("len=7"), N=7) ['l', 'e', 'n', '=', '7', '7', '7'] >>> listify(iter("len=5")) ['l', 'e', 'n', '=', '5'] >>> listify(None, 3) [[], [], []] >>> listify([None],3) [None, None, None] >>> listify([], 3) [[], [], []] >>> listify('', 2) ['', ''] >>> listify(0) [0] >>> listify(False, 2) [False, False] """ ans = [] if values is None else values # convert non-string non-list iterables into a list if hasattr(ans, '__iter__') and not isinstance(ans, basestring): ans = list(ans) else: # split the string (if possible) if isinstance(delim, basestring) and isinstance(ans, basestring): try: ans = ans.split(delim) except (IndexError, ValueError, AttributeError, TypeError): ans = [ans] else: ans = [ans] # pad the end of the list if a length has been specified if len(ans): if len(ans) < N and N > 1: ans += [ans[-1]] * (N - len(ans)) else: if N > 1: ans = [[]] * N return ans
python
def listify(values, N=1, delim=None): """Return an N-length list, with elements values, extrapolating as necessary. >>> listify("don't split into characters") ["don't split into characters"] >>> listify("len = 3", 3) ['len = 3', 'len = 3', 'len = 3'] >>> listify("But split on a delimeter, if requested.", delim=',') ['But split on a delimeter', ' if requested.'] >>> listify(["obj 1", "obj 2", "len = 4"], N=4) ['obj 1', 'obj 2', 'len = 4', 'len = 4'] >>> listify(iter("len=7"), N=7) ['l', 'e', 'n', '=', '7', '7', '7'] >>> listify(iter("len=5")) ['l', 'e', 'n', '=', '5'] >>> listify(None, 3) [[], [], []] >>> listify([None],3) [None, None, None] >>> listify([], 3) [[], [], []] >>> listify('', 2) ['', ''] >>> listify(0) [0] >>> listify(False, 2) [False, False] """ ans = [] if values is None else values # convert non-string non-list iterables into a list if hasattr(ans, '__iter__') and not isinstance(ans, basestring): ans = list(ans) else: # split the string (if possible) if isinstance(delim, basestring) and isinstance(ans, basestring): try: ans = ans.split(delim) except (IndexError, ValueError, AttributeError, TypeError): ans = [ans] else: ans = [ans] # pad the end of the list if a length has been specified if len(ans): if len(ans) < N and N > 1: ans += [ans[-1]] * (N - len(ans)) else: if N > 1: ans = [[]] * N return ans
[ "def", "listify", "(", "values", ",", "N", "=", "1", ",", "delim", "=", "None", ")", ":", "ans", "=", "[", "]", "if", "values", "is", "None", "else", "values", "# convert non-string non-list iterables into a list", "if", "hasattr", "(", "ans", ",", "'__iter__'", ")", "and", "not", "isinstance", "(", "ans", ",", "basestring", ")", ":", "ans", "=", "list", "(", "ans", ")", "else", ":", "# split the string (if possible)", "if", "isinstance", "(", "delim", ",", "basestring", ")", "and", "isinstance", "(", "ans", ",", "basestring", ")", ":", "try", ":", "ans", "=", "ans", ".", "split", "(", "delim", ")", "except", "(", "IndexError", ",", "ValueError", ",", "AttributeError", ",", "TypeError", ")", ":", "ans", "=", "[", "ans", "]", "else", ":", "ans", "=", "[", "ans", "]", "# pad the end of the list if a length has been specified", "if", "len", "(", "ans", ")", ":", "if", "len", "(", "ans", ")", "<", "N", "and", "N", ">", "1", ":", "ans", "+=", "[", "ans", "[", "-", "1", "]", "]", "*", "(", "N", "-", "len", "(", "ans", ")", ")", "else", ":", "if", "N", ">", "1", ":", "ans", "=", "[", "[", "]", "]", "*", "N", "return", "ans" ]
Return an N-length list, with elements values, extrapolating as necessary. >>> listify("don't split into characters") ["don't split into characters"] >>> listify("len = 3", 3) ['len = 3', 'len = 3', 'len = 3'] >>> listify("But split on a delimeter, if requested.", delim=',') ['But split on a delimeter', ' if requested.'] >>> listify(["obj 1", "obj 2", "len = 4"], N=4) ['obj 1', 'obj 2', 'len = 4', 'len = 4'] >>> listify(iter("len=7"), N=7) ['l', 'e', 'n', '=', '7', '7', '7'] >>> listify(iter("len=5")) ['l', 'e', 'n', '=', '5'] >>> listify(None, 3) [[], [], []] >>> listify([None],3) [None, None, None] >>> listify([], 3) [[], [], []] >>> listify('', 2) ['', ''] >>> listify(0) [0] >>> listify(False, 2) [False, False]
[ "Return", "an", "N", "-", "length", "list", "with", "elements", "values", "extrapolating", "as", "necessary", "." ]
c43445b14afddfdeadc5f3076675c9e8fc1ee67c
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L2464-L2515
train
totalgood/pugnlp
src/pugnlp/util.py
unlistify
def unlistify(n, depth=1, typ=list, get=None): """Return the desired element in a list ignoring the rest. >>> unlistify([1,2,3]) 1 >>> unlistify([1,[4, 5, 6],3], get=1) [4, 5, 6] >>> unlistify([1,[4, 5, 6],3], depth=2, get=1) 5 >>> unlistify([1,(4, 5, 6),3], depth=2, get=1) (4, 5, 6) >>> unlistify([1,2,(4, 5, 6)], depth=2, get=2) (4, 5, 6) >>> unlistify([1,2,(4, 5, 6)], depth=2, typ=(list, tuple), get=2) 6 """ i = 0 if depth is None: depth = 1 index_desired = get or 0 while i < depth and isinstance(n, typ): if len(n): if len(n) > index_desired: n = n[index_desired] i += 1 else: return n return n
python
def unlistify(n, depth=1, typ=list, get=None): """Return the desired element in a list ignoring the rest. >>> unlistify([1,2,3]) 1 >>> unlistify([1,[4, 5, 6],3], get=1) [4, 5, 6] >>> unlistify([1,[4, 5, 6],3], depth=2, get=1) 5 >>> unlistify([1,(4, 5, 6),3], depth=2, get=1) (4, 5, 6) >>> unlistify([1,2,(4, 5, 6)], depth=2, get=2) (4, 5, 6) >>> unlistify([1,2,(4, 5, 6)], depth=2, typ=(list, tuple), get=2) 6 """ i = 0 if depth is None: depth = 1 index_desired = get or 0 while i < depth and isinstance(n, typ): if len(n): if len(n) > index_desired: n = n[index_desired] i += 1 else: return n return n
[ "def", "unlistify", "(", "n", ",", "depth", "=", "1", ",", "typ", "=", "list", ",", "get", "=", "None", ")", ":", "i", "=", "0", "if", "depth", "is", "None", ":", "depth", "=", "1", "index_desired", "=", "get", "or", "0", "while", "i", "<", "depth", "and", "isinstance", "(", "n", ",", "typ", ")", ":", "if", "len", "(", "n", ")", ":", "if", "len", "(", "n", ")", ">", "index_desired", ":", "n", "=", "n", "[", "index_desired", "]", "i", "+=", "1", "else", ":", "return", "n", "return", "n" ]
Return the desired element in a list ignoring the rest. >>> unlistify([1,2,3]) 1 >>> unlistify([1,[4, 5, 6],3], get=1) [4, 5, 6] >>> unlistify([1,[4, 5, 6],3], depth=2, get=1) 5 >>> unlistify([1,(4, 5, 6),3], depth=2, get=1) (4, 5, 6) >>> unlistify([1,2,(4, 5, 6)], depth=2, get=2) (4, 5, 6) >>> unlistify([1,2,(4, 5, 6)], depth=2, typ=(list, tuple), get=2) 6
[ "Return", "the", "desired", "element", "in", "a", "list", "ignoring", "the", "rest", "." ]
c43445b14afddfdeadc5f3076675c9e8fc1ee67c
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L2522-L2549
train
totalgood/pugnlp
src/pugnlp/util.py
strip_keys
def strip_keys(d, nones=False, depth=0): r"""Strip whitespace from all dictionary keys, to the depth indicated >>> strip_keys({' a': ' a', ' b\t c ': {'d e ': 'd e '}}) == {'a': ' a', 'b\t c': {'d e ': 'd e '}} True >>> strip_keys({' a': ' a', ' b\t c ': {'d e ': 'd e '}}, depth=100) == {'a': ' a', 'b\t c': {'d e': 'd e '}} True """ ans = type(d)((str(k).strip(), v) for (k, v) in viewitems(OrderedDict(d)) if (not nones or (str(k).strip() and str(k).strip() != 'None'))) if int(depth) < 1: return ans if int(depth) > strip_keys.MAX_DEPTH: warnings.warn(RuntimeWarning("Maximum recursion depth allowance (%r) exceeded." % strip_keys.MAX_DEPTH)) for k, v in viewitems(ans): if isinstance(v, Mapping): ans[k] = strip_keys(v, nones=nones, depth=int(depth) - 1) return ans
python
def strip_keys(d, nones=False, depth=0): r"""Strip whitespace from all dictionary keys, to the depth indicated >>> strip_keys({' a': ' a', ' b\t c ': {'d e ': 'd e '}}) == {'a': ' a', 'b\t c': {'d e ': 'd e '}} True >>> strip_keys({' a': ' a', ' b\t c ': {'d e ': 'd e '}}, depth=100) == {'a': ' a', 'b\t c': {'d e': 'd e '}} True """ ans = type(d)((str(k).strip(), v) for (k, v) in viewitems(OrderedDict(d)) if (not nones or (str(k).strip() and str(k).strip() != 'None'))) if int(depth) < 1: return ans if int(depth) > strip_keys.MAX_DEPTH: warnings.warn(RuntimeWarning("Maximum recursion depth allowance (%r) exceeded." % strip_keys.MAX_DEPTH)) for k, v in viewitems(ans): if isinstance(v, Mapping): ans[k] = strip_keys(v, nones=nones, depth=int(depth) - 1) return ans
[ "def", "strip_keys", "(", "d", ",", "nones", "=", "False", ",", "depth", "=", "0", ")", ":", "ans", "=", "type", "(", "d", ")", "(", "(", "str", "(", "k", ")", ".", "strip", "(", ")", ",", "v", ")", "for", "(", "k", ",", "v", ")", "in", "viewitems", "(", "OrderedDict", "(", "d", ")", ")", "if", "(", "not", "nones", "or", "(", "str", "(", "k", ")", ".", "strip", "(", ")", "and", "str", "(", "k", ")", ".", "strip", "(", ")", "!=", "'None'", ")", ")", ")", "if", "int", "(", "depth", ")", "<", "1", ":", "return", "ans", "if", "int", "(", "depth", ")", ">", "strip_keys", ".", "MAX_DEPTH", ":", "warnings", ".", "warn", "(", "RuntimeWarning", "(", "\"Maximum recursion depth allowance (%r) exceeded.\"", "%", "strip_keys", ".", "MAX_DEPTH", ")", ")", "for", "k", ",", "v", "in", "viewitems", "(", "ans", ")", ":", "if", "isinstance", "(", "v", ",", "Mapping", ")", ":", "ans", "[", "k", "]", "=", "strip_keys", "(", "v", ",", "nones", "=", "nones", ",", "depth", "=", "int", "(", "depth", ")", "-", "1", ")", "return", "ans" ]
r"""Strip whitespace from all dictionary keys, to the depth indicated >>> strip_keys({' a': ' a', ' b\t c ': {'d e ': 'd e '}}) == {'a': ' a', 'b\t c': {'d e ': 'd e '}} True >>> strip_keys({' a': ' a', ' b\t c ': {'d e ': 'd e '}}, depth=100) == {'a': ' a', 'b\t c': {'d e': 'd e '}} True
[ "r", "Strip", "whitespace", "from", "all", "dictionary", "keys", "to", "the", "depth", "indicated" ]
c43445b14afddfdeadc5f3076675c9e8fc1ee67c
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L2568-L2585
train
totalgood/pugnlp
src/pugnlp/util.py
get_table_from_csv
def get_table_from_csv(filename='ssg_report_aarons_returns.csv', delimiter=',', dos=False): """Dictionary of sequences from CSV file""" table = [] with open(filename, 'rb') as f: reader = csv.reader(f, dialect='excel', delimiter=delimiter) for row in reader: table += [row] if not dos: return table return dos_from_table(table)
python
def get_table_from_csv(filename='ssg_report_aarons_returns.csv', delimiter=',', dos=False): """Dictionary of sequences from CSV file""" table = [] with open(filename, 'rb') as f: reader = csv.reader(f, dialect='excel', delimiter=delimiter) for row in reader: table += [row] if not dos: return table return dos_from_table(table)
[ "def", "get_table_from_csv", "(", "filename", "=", "'ssg_report_aarons_returns.csv'", ",", "delimiter", "=", "','", ",", "dos", "=", "False", ")", ":", "table", "=", "[", "]", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "f", ":", "reader", "=", "csv", ".", "reader", "(", "f", ",", "dialect", "=", "'excel'", ",", "delimiter", "=", "delimiter", ")", "for", "row", "in", "reader", ":", "table", "+=", "[", "row", "]", "if", "not", "dos", ":", "return", "table", "return", "dos_from_table", "(", "table", ")" ]
Dictionary of sequences from CSV file
[ "Dictionary", "of", "sequences", "from", "CSV", "file" ]
c43445b14afddfdeadc5f3076675c9e8fc1ee67c
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L2596-L2605
train
totalgood/pugnlp
src/pugnlp/util.py
shorten
def shorten(s, max_len=16): """Attempt to shorten a phrase by deleting words at the end of the phrase >>> shorten('Hello World!') 'Hello World' >>> shorten("Hello World! I'll talk your ear off!", 15) 'Hello World' """ short = s words = [abbreviate(word) for word in get_words(s)] for i in range(len(words), 0, -1): short = ' '.join(words[:i]) if len(short) <= max_len: break return short[:max_len]
python
def shorten(s, max_len=16): """Attempt to shorten a phrase by deleting words at the end of the phrase >>> shorten('Hello World!') 'Hello World' >>> shorten("Hello World! I'll talk your ear off!", 15) 'Hello World' """ short = s words = [abbreviate(word) for word in get_words(s)] for i in range(len(words), 0, -1): short = ' '.join(words[:i]) if len(short) <= max_len: break return short[:max_len]
[ "def", "shorten", "(", "s", ",", "max_len", "=", "16", ")", ":", "short", "=", "s", "words", "=", "[", "abbreviate", "(", "word", ")", "for", "word", "in", "get_words", "(", "s", ")", "]", "for", "i", "in", "range", "(", "len", "(", "words", ")", ",", "0", ",", "-", "1", ")", ":", "short", "=", "' '", ".", "join", "(", "words", "[", ":", "i", "]", ")", "if", "len", "(", "short", ")", "<=", "max_len", ":", "break", "return", "short", "[", ":", "max_len", "]" ]
Attempt to shorten a phrase by deleting words at the end of the phrase >>> shorten('Hello World!') 'Hello World' >>> shorten("Hello World! I'll talk your ear off!", 15) 'Hello World'
[ "Attempt", "to", "shorten", "a", "phrase", "by", "deleting", "words", "at", "the", "end", "of", "the", "phrase" ]
c43445b14afddfdeadc5f3076675c9e8fc1ee67c
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L2627-L2641
train
totalgood/pugnlp
src/pugnlp/util.py
truncate
def truncate(s, max_len=20, ellipsis='...'): r"""Return string at most `max_len` characters or sequence elments appended with the `ellipsis` characters >>> truncate(OrderedDict(zip(list('ABCDEFGH'), range(8))), 1) "{'A': 0..." >>> truncate(list(range(5)), 3) '[0, 1, 2...' >>> truncate(np.arange(5), 3) '[0, 1, 2...' >>> truncate('Too verbose for its own good.', 11) 'Too verbose...' """ if s is None: return None elif isinstance(s, basestring): return s[:min(len(s), max_len)] + ellipsis if len(s) > max_len else '' elif isinstance(s, Mapping): truncated_str = str(dict(islice(viewitems(s), max_len))) else: truncated_str = str(list(islice(s, max_len))) return truncated_str[:-1] + '...' if len(s) > max_len else truncated_str
python
def truncate(s, max_len=20, ellipsis='...'): r"""Return string at most `max_len` characters or sequence elments appended with the `ellipsis` characters >>> truncate(OrderedDict(zip(list('ABCDEFGH'), range(8))), 1) "{'A': 0..." >>> truncate(list(range(5)), 3) '[0, 1, 2...' >>> truncate(np.arange(5), 3) '[0, 1, 2...' >>> truncate('Too verbose for its own good.', 11) 'Too verbose...' """ if s is None: return None elif isinstance(s, basestring): return s[:min(len(s), max_len)] + ellipsis if len(s) > max_len else '' elif isinstance(s, Mapping): truncated_str = str(dict(islice(viewitems(s), max_len))) else: truncated_str = str(list(islice(s, max_len))) return truncated_str[:-1] + '...' if len(s) > max_len else truncated_str
[ "def", "truncate", "(", "s", ",", "max_len", "=", "20", ",", "ellipsis", "=", "'...'", ")", ":", "if", "s", "is", "None", ":", "return", "None", "elif", "isinstance", "(", "s", ",", "basestring", ")", ":", "return", "s", "[", ":", "min", "(", "len", "(", "s", ")", ",", "max_len", ")", "]", "+", "ellipsis", "if", "len", "(", "s", ")", ">", "max_len", "else", "''", "elif", "isinstance", "(", "s", ",", "Mapping", ")", ":", "truncated_str", "=", "str", "(", "dict", "(", "islice", "(", "viewitems", "(", "s", ")", ",", "max_len", ")", ")", ")", "else", ":", "truncated_str", "=", "str", "(", "list", "(", "islice", "(", "s", ",", "max_len", ")", ")", ")", "return", "truncated_str", "[", ":", "-", "1", "]", "+", "'...'", "if", "len", "(", "s", ")", ">", "max_len", "else", "truncated_str" ]
r"""Return string at most `max_len` characters or sequence elments appended with the `ellipsis` characters >>> truncate(OrderedDict(zip(list('ABCDEFGH'), range(8))), 1) "{'A': 0..." >>> truncate(list(range(5)), 3) '[0, 1, 2...' >>> truncate(np.arange(5), 3) '[0, 1, 2...' >>> truncate('Too verbose for its own good.', 11) 'Too verbose...'
[ "r", "Return", "string", "at", "most", "max_len", "characters", "or", "sequence", "elments", "appended", "with", "the", "ellipsis", "characters" ]
c43445b14afddfdeadc5f3076675c9e8fc1ee67c
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L2656-L2676
train
totalgood/pugnlp
src/pugnlp/util.py
slash_product
def slash_product(string_or_seq, slash='/', space=' '): """Return a list of all possible meanings of a phrase containing slashes TODO: - Code is not in standard Sedgewick recursion form - Simplify by removing one of the recursive calls? - Simplify by using a list comprehension? # doctest: +NORMALIZE_WHITESPACE >>> slash_product("The challenging/confusing interview didn't end with success/offer") ["The challenging interview didn't end with success", "The challenging interview didn't end with offer", "The confusing interview didn't end with success", "The confusing interview didn't end with offer"] >>> slash_product('I say goodbye/hello cruel/fun world.') # doctest: +NORMALIZE_WHITESPACE ['I say goodbye cruel world.', 'I say goodbye fun world.', 'I say hello cruel world.', 'I say hello fun world.'] >>> slash_product('I say goodbye/hello/bonjour cruelness/fun/world') # doctest: +NORMALIZE_WHITESPACE ['I say goodbye cruelness', 'I say goodbye fun', 'I say goodbye world', 'I say hello cruelness', 'I say hello fun', 'I say hello world', 'I say bonjour cruelness', 'I say bonjour fun', 'I say bonjour world'] """ # Terminating case is a sequence of strings without any slashes if not isinstance(string_or_seq, basestring): # If it's not a string and has no slashes, we're done if not any(slash in s for s in string_or_seq): return list(string_or_seq) ans = [] for s in string_or_seq: # slash_product of a string will always return a flat list ans += slash_product(s) return slash_product(ans) # Another terminating case is a single string without any slashes if slash not in string_or_seq: return [string_or_seq] # The third case is a string with some slashes in it i = string_or_seq.index(slash) head, tail = string_or_seq[:i].split(space), string_or_seq[i + 1:].split(space) alternatives = head[-1], tail[0] head, tail = space.join(head[:-1]), space.join(tail[1:]) return slash_product([space.join([head, word, tail]).strip(space) for word in alternatives])
python
def slash_product(string_or_seq, slash='/', space=' '): """Return a list of all possible meanings of a phrase containing slashes TODO: - Code is not in standard Sedgewick recursion form - Simplify by removing one of the recursive calls? - Simplify by using a list comprehension? # doctest: +NORMALIZE_WHITESPACE >>> slash_product("The challenging/confusing interview didn't end with success/offer") ["The challenging interview didn't end with success", "The challenging interview didn't end with offer", "The confusing interview didn't end with success", "The confusing interview didn't end with offer"] >>> slash_product('I say goodbye/hello cruel/fun world.') # doctest: +NORMALIZE_WHITESPACE ['I say goodbye cruel world.', 'I say goodbye fun world.', 'I say hello cruel world.', 'I say hello fun world.'] >>> slash_product('I say goodbye/hello/bonjour cruelness/fun/world') # doctest: +NORMALIZE_WHITESPACE ['I say goodbye cruelness', 'I say goodbye fun', 'I say goodbye world', 'I say hello cruelness', 'I say hello fun', 'I say hello world', 'I say bonjour cruelness', 'I say bonjour fun', 'I say bonjour world'] """ # Terminating case is a sequence of strings without any slashes if not isinstance(string_or_seq, basestring): # If it's not a string and has no slashes, we're done if not any(slash in s for s in string_or_seq): return list(string_or_seq) ans = [] for s in string_or_seq: # slash_product of a string will always return a flat list ans += slash_product(s) return slash_product(ans) # Another terminating case is a single string without any slashes if slash not in string_or_seq: return [string_or_seq] # The third case is a string with some slashes in it i = string_or_seq.index(slash) head, tail = string_or_seq[:i].split(space), string_or_seq[i + 1:].split(space) alternatives = head[-1], tail[0] head, tail = space.join(head[:-1]), space.join(tail[1:]) return slash_product([space.join([head, word, tail]).strip(space) for word in alternatives])
[ "def", "slash_product", "(", "string_or_seq", ",", "slash", "=", "'/'", ",", "space", "=", "' '", ")", ":", "# Terminating case is a sequence of strings without any slashes", "if", "not", "isinstance", "(", "string_or_seq", ",", "basestring", ")", ":", "# If it's not a string and has no slashes, we're done", "if", "not", "any", "(", "slash", "in", "s", "for", "s", "in", "string_or_seq", ")", ":", "return", "list", "(", "string_or_seq", ")", "ans", "=", "[", "]", "for", "s", "in", "string_or_seq", ":", "# slash_product of a string will always return a flat list", "ans", "+=", "slash_product", "(", "s", ")", "return", "slash_product", "(", "ans", ")", "# Another terminating case is a single string without any slashes", "if", "slash", "not", "in", "string_or_seq", ":", "return", "[", "string_or_seq", "]", "# The third case is a string with some slashes in it", "i", "=", "string_or_seq", ".", "index", "(", "slash", ")", "head", ",", "tail", "=", "string_or_seq", "[", ":", "i", "]", ".", "split", "(", "space", ")", ",", "string_or_seq", "[", "i", "+", "1", ":", "]", ".", "split", "(", "space", ")", "alternatives", "=", "head", "[", "-", "1", "]", ",", "tail", "[", "0", "]", "head", ",", "tail", "=", "space", ".", "join", "(", "head", "[", ":", "-", "1", "]", ")", ",", "space", ".", "join", "(", "tail", "[", "1", ":", "]", ")", "return", "slash_product", "(", "[", "space", ".", "join", "(", "[", "head", ",", "word", ",", "tail", "]", ")", ".", "strip", "(", "space", ")", "for", "word", "in", "alternatives", "]", ")" ]
Return a list of all possible meanings of a phrase containing slashes TODO: - Code is not in standard Sedgewick recursion form - Simplify by removing one of the recursive calls? - Simplify by using a list comprehension? # doctest: +NORMALIZE_WHITESPACE >>> slash_product("The challenging/confusing interview didn't end with success/offer") ["The challenging interview didn't end with success", "The challenging interview didn't end with offer", "The confusing interview didn't end with success", "The confusing interview didn't end with offer"] >>> slash_product('I say goodbye/hello cruel/fun world.') # doctest: +NORMALIZE_WHITESPACE ['I say goodbye cruel world.', 'I say goodbye fun world.', 'I say hello cruel world.', 'I say hello fun world.'] >>> slash_product('I say goodbye/hello/bonjour cruelness/fun/world') # doctest: +NORMALIZE_WHITESPACE ['I say goodbye cruelness', 'I say goodbye fun', 'I say goodbye world', 'I say hello cruelness', 'I say hello fun', 'I say hello world', 'I say bonjour cruelness', 'I say bonjour fun', 'I say bonjour world']
[ "Return", "a", "list", "of", "all", "possible", "meanings", "of", "a", "phrase", "containing", "slashes" ]
c43445b14afddfdeadc5f3076675c9e8fc1ee67c
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L2916-L2964
train
DarkEnergySurvey/ugali
ugali/scratch/position_angle.py
create_header
def create_header(coord, radius, proj='ZEA', npix=30): """ Create a header a new image """ gal = coord.name == 'galactic' values = [ ["NAXIS", 2, ], ["NAXIS1", npix, ], ["NAXIS2", npix, ], ["CTYPE1", 'GLON-%s'%proj if gal else 'RA---%s'%proj ], ["CTYPE2", 'GLAT-%s'%proj if gal else 'DEC--%s'%proj ], ["CRPIX1", npix/2. + 0.5, ], ["CRPIX2", npix/2. + 0.5, ], ["CRVAL1", coord.l.deg if gal else coord.ra.deg, ], ["CRVAL2", coord.b.deg if gal else coord.dec.deg, ], ["CDELT1", -3.*radius/npix, ], ["CDELT2", 3.*radius/npix, ], ] if not gal: values += [ ['RADECSYS','FK5'], ['EQUINOX',2000], ] cards = [pyfits.Card(*i) for i in values] header=pyfits.Header(cards=cards) return header
python
def create_header(coord, radius, proj='ZEA', npix=30): """ Create a header a new image """ gal = coord.name == 'galactic' values = [ ["NAXIS", 2, ], ["NAXIS1", npix, ], ["NAXIS2", npix, ], ["CTYPE1", 'GLON-%s'%proj if gal else 'RA---%s'%proj ], ["CTYPE2", 'GLAT-%s'%proj if gal else 'DEC--%s'%proj ], ["CRPIX1", npix/2. + 0.5, ], ["CRPIX2", npix/2. + 0.5, ], ["CRVAL1", coord.l.deg if gal else coord.ra.deg, ], ["CRVAL2", coord.b.deg if gal else coord.dec.deg, ], ["CDELT1", -3.*radius/npix, ], ["CDELT2", 3.*radius/npix, ], ] if not gal: values += [ ['RADECSYS','FK5'], ['EQUINOX',2000], ] cards = [pyfits.Card(*i) for i in values] header=pyfits.Header(cards=cards) return header
[ "def", "create_header", "(", "coord", ",", "radius", ",", "proj", "=", "'ZEA'", ",", "npix", "=", "30", ")", ":", "gal", "=", "coord", ".", "name", "==", "'galactic'", "values", "=", "[", "[", "\"NAXIS\"", ",", "2", ",", "]", ",", "[", "\"NAXIS1\"", ",", "npix", ",", "]", ",", "[", "\"NAXIS2\"", ",", "npix", ",", "]", ",", "[", "\"CTYPE1\"", ",", "'GLON-%s'", "%", "proj", "if", "gal", "else", "'RA---%s'", "%", "proj", "]", ",", "[", "\"CTYPE2\"", ",", "'GLAT-%s'", "%", "proj", "if", "gal", "else", "'DEC--%s'", "%", "proj", "]", ",", "[", "\"CRPIX1\"", ",", "npix", "/", "2.", "+", "0.5", ",", "]", ",", "[", "\"CRPIX2\"", ",", "npix", "/", "2.", "+", "0.5", ",", "]", ",", "[", "\"CRVAL1\"", ",", "coord", ".", "l", ".", "deg", "if", "gal", "else", "coord", ".", "ra", ".", "deg", ",", "]", ",", "[", "\"CRVAL2\"", ",", "coord", ".", "b", ".", "deg", "if", "gal", "else", "coord", ".", "dec", ".", "deg", ",", "]", ",", "[", "\"CDELT1\"", ",", "-", "3.", "*", "radius", "/", "npix", ",", "]", ",", "[", "\"CDELT2\"", ",", "3.", "*", "radius", "/", "npix", ",", "]", ",", "]", "if", "not", "gal", ":", "values", "+=", "[", "[", "'RADECSYS'", ",", "'FK5'", "]", ",", "[", "'EQUINOX'", ",", "2000", "]", ",", "]", "cards", "=", "[", "pyfits", ".", "Card", "(", "*", "i", ")", "for", "i", "in", "values", "]", "header", "=", "pyfits", ".", "Header", "(", "cards", "=", "cards", ")", "return", "header" ]
Create a header a new image
[ "Create", "a", "header", "a", "new", "image" ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/scratch/position_angle.py#L19-L50
train
totalgood/pugnlp
src/pugnlp/penn_treebank_tokenizer.py
word_tokenize
def word_tokenize(text): """ Split string `text` into word tokens using the Penn Treebank rules """ for (regexp, replacement) in RULES1: text = sub(regexp, replacement, text) # add extra space to make things easier text = " " + text + " " for (regexp, replacement) in RULES2: text = sub(regexp, replacement, text) for regexp in CONTRACTIONS: text = sub(regexp, r"\1 \2 ", text) # split and return return text.split()
python
def word_tokenize(text): """ Split string `text` into word tokens using the Penn Treebank rules """ for (regexp, replacement) in RULES1: text = sub(regexp, replacement, text) # add extra space to make things easier text = " " + text + " " for (regexp, replacement) in RULES2: text = sub(regexp, replacement, text) for regexp in CONTRACTIONS: text = sub(regexp, r"\1 \2 ", text) # split and return return text.split()
[ "def", "word_tokenize", "(", "text", ")", ":", "for", "(", "regexp", ",", "replacement", ")", "in", "RULES1", ":", "text", "=", "sub", "(", "regexp", ",", "replacement", ",", "text", ")", "# add extra space to make things easier", "text", "=", "\" \"", "+", "text", "+", "\" \"", "for", "(", "regexp", ",", "replacement", ")", "in", "RULES2", ":", "text", "=", "sub", "(", "regexp", ",", "replacement", ",", "text", ")", "for", "regexp", "in", "CONTRACTIONS", ":", "text", "=", "sub", "(", "regexp", ",", "r\"\\1 \\2 \"", ",", "text", ")", "# split and return", "return", "text", ".", "split", "(", ")" ]
Split string `text` into word tokens using the Penn Treebank rules
[ "Split", "string", "text", "into", "word", "tokens", "using", "the", "Penn", "Treebank", "rules" ]
c43445b14afddfdeadc5f3076675c9e8fc1ee67c
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/penn_treebank_tokenizer.py#L83-L96
train
hootnot/postcode-api-wrapper
postcodepy/postcodepy.py
EndpointsMixin.get_postcodedata
def get_postcodedata(self, postcode, nr, addition="", **params): """get_postcodedata - fetch information for 'postcode'. Parameters ---------- postcode : string The full (dutch) postcode nr : int The housenumber addition : string (optional) the extension to a housenumber params : dict (optional) a list of parameters to send with the request. returns : a response dictionary """ endpoint = 'rest/addresses/%s/%s' % (postcode, nr) if addition: endpoint += '/' + addition retValue = self._API__request(endpoint, params=params) # then it should match the houseNumberAdditions if addition and addition.upper() not in \ [a.upper() for a in retValue['houseNumberAdditions']]: raise PostcodeError( "ERRHouseNumberAdditionInvalid", {"exceptionId": "ERRHouseNumberAdditionInvalid", "exception": "Invalid housenumber addition: '%s'" % retValue['houseNumberAddition'], "validHouseNumberAdditions": retValue['houseNumberAdditions']}) return retValue
python
def get_postcodedata(self, postcode, nr, addition="", **params): """get_postcodedata - fetch information for 'postcode'. Parameters ---------- postcode : string The full (dutch) postcode nr : int The housenumber addition : string (optional) the extension to a housenumber params : dict (optional) a list of parameters to send with the request. returns : a response dictionary """ endpoint = 'rest/addresses/%s/%s' % (postcode, nr) if addition: endpoint += '/' + addition retValue = self._API__request(endpoint, params=params) # then it should match the houseNumberAdditions if addition and addition.upper() not in \ [a.upper() for a in retValue['houseNumberAdditions']]: raise PostcodeError( "ERRHouseNumberAdditionInvalid", {"exceptionId": "ERRHouseNumberAdditionInvalid", "exception": "Invalid housenumber addition: '%s'" % retValue['houseNumberAddition'], "validHouseNumberAdditions": retValue['houseNumberAdditions']}) return retValue
[ "def", "get_postcodedata", "(", "self", ",", "postcode", ",", "nr", ",", "addition", "=", "\"\"", ",", "*", "*", "params", ")", ":", "endpoint", "=", "'rest/addresses/%s/%s'", "%", "(", "postcode", ",", "nr", ")", "if", "addition", ":", "endpoint", "+=", "'/'", "+", "addition", "retValue", "=", "self", ".", "_API__request", "(", "endpoint", ",", "params", "=", "params", ")", "# then it should match the houseNumberAdditions", "if", "addition", "and", "addition", ".", "upper", "(", ")", "not", "in", "[", "a", ".", "upper", "(", ")", "for", "a", "in", "retValue", "[", "'houseNumberAdditions'", "]", "]", ":", "raise", "PostcodeError", "(", "\"ERRHouseNumberAdditionInvalid\"", ",", "{", "\"exceptionId\"", ":", "\"ERRHouseNumberAdditionInvalid\"", ",", "\"exception\"", ":", "\"Invalid housenumber addition: '%s'\"", "%", "retValue", "[", "'houseNumberAddition'", "]", ",", "\"validHouseNumberAdditions\"", ":", "retValue", "[", "'houseNumberAdditions'", "]", "}", ")", "return", "retValue" ]
get_postcodedata - fetch information for 'postcode'. Parameters ---------- postcode : string The full (dutch) postcode nr : int The housenumber addition : string (optional) the extension to a housenumber params : dict (optional) a list of parameters to send with the request. returns : a response dictionary
[ "get_postcodedata", "-", "fetch", "information", "for", "postcode", "." ]
42359cb9402f84a06f7d58f889f1156d653f5ea9
https://github.com/hootnot/postcode-api-wrapper/blob/42359cb9402f84a06f7d58f889f1156d653f5ea9/postcodepy/postcodepy.py#L14-L51
train
hootnot/postcode-api-wrapper
postcodepy/postcodepy.py
EndpointsMixin.get_signalcheck
def get_signalcheck(self, sar, **params): """get_signalcheck - perform a signal check. Parameters ---------- sar : dict signal-api-request specified as a dictionary of parameters. All of these parameters are optional. For details check https://api.postcode.nl/documentation/signal-api-example. returns : a response dictionary """ params = sar endpoint = 'rest/signal/check' # The 'sar'-request dictionary should be sent as valid JSON data, so # we need to convert it to JSON # when we construct the request in API.request retValue = self._API__request(endpoint, 'POST', params=params, convJSON=True) return retValue
python
def get_signalcheck(self, sar, **params): """get_signalcheck - perform a signal check. Parameters ---------- sar : dict signal-api-request specified as a dictionary of parameters. All of these parameters are optional. For details check https://api.postcode.nl/documentation/signal-api-example. returns : a response dictionary """ params = sar endpoint = 'rest/signal/check' # The 'sar'-request dictionary should be sent as valid JSON data, so # we need to convert it to JSON # when we construct the request in API.request retValue = self._API__request(endpoint, 'POST', params=params, convJSON=True) return retValue
[ "def", "get_signalcheck", "(", "self", ",", "sar", ",", "*", "*", "params", ")", ":", "params", "=", "sar", "endpoint", "=", "'rest/signal/check'", "# The 'sar'-request dictionary should be sent as valid JSON data, so", "# we need to convert it to JSON", "# when we construct the request in API.request", "retValue", "=", "self", ".", "_API__request", "(", "endpoint", ",", "'POST'", ",", "params", "=", "params", ",", "convJSON", "=", "True", ")", "return", "retValue" ]
get_signalcheck - perform a signal check. Parameters ---------- sar : dict signal-api-request specified as a dictionary of parameters. All of these parameters are optional. For details check https://api.postcode.nl/documentation/signal-api-example. returns : a response dictionary
[ "get_signalcheck", "-", "perform", "a", "signal", "check", "." ]
42359cb9402f84a06f7d58f889f1156d653f5ea9
https://github.com/hootnot/postcode-api-wrapper/blob/42359cb9402f84a06f7d58f889f1156d653f5ea9/postcodepy/postcodepy.py#L53-L75
train
hootnot/postcode-api-wrapper
postcodepy/postcodepy.py
API.__request
def __request(self, endpoint, method='GET', params=None, convJSON=False): """request - Returns dict of response from postcode.nl API. This method is called only by the EndpointMixin methods. """ url = '%s/%s' % (self.api_url, endpoint) method = method.lower() params = params or {} if convJSON: params = json.dumps(params) func = getattr(self.client, method) request_args = {} if method == 'get': request_args['params'] = params else: request_args['data'] = params try: # Normally some valid HTTP-response will be the case # if not some exception regarding the request / connection has # occurred # this will be one of the exceptions of the request module # if so, we will a PostcodeError exception and pass the request # exception message response = func(url, **request_args) except requests.RequestException as e: raise PostcodeError("ERRrequest", {"exception": e.__doc__}) content = response.content.decode('utf-8') content = json.loads(content) if response.status_code == 200: return content # Errors, otherwise we did not get here ... if 'exceptionId' in content: raise PostcodeError(content['exceptionId'], content) raise PostcodeError("UnknownExceptionFromPostcodeNl")
python
def __request(self, endpoint, method='GET', params=None, convJSON=False): """request - Returns dict of response from postcode.nl API. This method is called only by the EndpointMixin methods. """ url = '%s/%s' % (self.api_url, endpoint) method = method.lower() params = params or {} if convJSON: params = json.dumps(params) func = getattr(self.client, method) request_args = {} if method == 'get': request_args['params'] = params else: request_args['data'] = params try: # Normally some valid HTTP-response will be the case # if not some exception regarding the request / connection has # occurred # this will be one of the exceptions of the request module # if so, we will a PostcodeError exception and pass the request # exception message response = func(url, **request_args) except requests.RequestException as e: raise PostcodeError("ERRrequest", {"exception": e.__doc__}) content = response.content.decode('utf-8') content = json.loads(content) if response.status_code == 200: return content # Errors, otherwise we did not get here ... if 'exceptionId' in content: raise PostcodeError(content['exceptionId'], content) raise PostcodeError("UnknownExceptionFromPostcodeNl")
[ "def", "__request", "(", "self", ",", "endpoint", ",", "method", "=", "'GET'", ",", "params", "=", "None", ",", "convJSON", "=", "False", ")", ":", "url", "=", "'%s/%s'", "%", "(", "self", ".", "api_url", ",", "endpoint", ")", "method", "=", "method", ".", "lower", "(", ")", "params", "=", "params", "or", "{", "}", "if", "convJSON", ":", "params", "=", "json", ".", "dumps", "(", "params", ")", "func", "=", "getattr", "(", "self", ".", "client", ",", "method", ")", "request_args", "=", "{", "}", "if", "method", "==", "'get'", ":", "request_args", "[", "'params'", "]", "=", "params", "else", ":", "request_args", "[", "'data'", "]", "=", "params", "try", ":", "# Normally some valid HTTP-response will be the case", "# if not some exception regarding the request / connection has", "# occurred", "# this will be one of the exceptions of the request module", "# if so, we will a PostcodeError exception and pass the request", "# exception message", "response", "=", "func", "(", "url", ",", "*", "*", "request_args", ")", "except", "requests", ".", "RequestException", "as", "e", ":", "raise", "PostcodeError", "(", "\"ERRrequest\"", ",", "{", "\"exception\"", ":", "e", ".", "__doc__", "}", ")", "content", "=", "response", ".", "content", ".", "decode", "(", "'utf-8'", ")", "content", "=", "json", ".", "loads", "(", "content", ")", "if", "response", ".", "status_code", "==", "200", ":", "return", "content", "# Errors, otherwise we did not get here ...", "if", "'exceptionId'", "in", "content", ":", "raise", "PostcodeError", "(", "content", "[", "'exceptionId'", "]", ",", "content", ")", "raise", "PostcodeError", "(", "\"UnknownExceptionFromPostcodeNl\"", ")" ]
request - Returns dict of response from postcode.nl API. This method is called only by the EndpointMixin methods.
[ "request", "-", "Returns", "dict", "of", "response", "from", "postcode", ".", "nl", "API", "." ]
42359cb9402f84a06f7d58f889f1156d653f5ea9
https://github.com/hootnot/postcode-api-wrapper/blob/42359cb9402f84a06f7d58f889f1156d653f5ea9/postcodepy/postcodepy.py#L122-L163
train
totalgood/pugnlp
src/pugnlp/plots.py
regression_and_plot
def regression_and_plot(x, y=None): """ Fit a line to the x, y data supplied and plot it along with teh raw samples >>> age = [25, 26, 33, 29, 27, 21, 26, 35, 21, 37, 21, 38, 18, 19, 36, 30, 29, 24, 24, 36, 36, 27, ... 33, 23, 21, 26, 27, 27, 24, 26, 25, 24, 22, 25, 40, 39, 19, 31, 33, 30, 33, 27, 40, 32, ... 31, 35, 26, 34, 27, 34, 33, 20, 19, 40, 39, 39, 37, 18, 35, 20, 28, 31, 30, 29, 31, 18, ... 40, 20, 32, 20, 34, 34, 25, 29, 40, 40, 39, 36, 39, 34, 34, 35, 39, 38, 33, 32, 21, 29, ... 36, 33, 30, 39, 21, 19, 38, 30, 40, 36, 34, 28, 37, 29, 39, 25, 36, 33, 37, 19, 28, 26, 18, 22, ... 40, 20, 40, 20, 39, 29, 26, 26, 22, 37, 34, 29, 24, 23, 21, 19, 29, 30, 23, 40, 30, 30, 19, 39, ... 39, 25, 36, 38, 24, 32, 34, 33, 36, 30, 35, 26, 28, 23, 25, 23, 40, 20, 26, 26, 22, 23, 18, 36, ... 34, 36, 35, 40, 39, 39, 33, 22, 37, 20, 37, 35, 20, 23, 37, 32, 25, 35, 35, 22, 21, 31, 40, 26, ... 24, 29, 37, 19, 33, 31, 29, 27, 21, 19, 39, 34, 34, 40, 26, 39, 35, 31, 35, 24, 19, 27, 27, 20, ... 28, 30, 23, 21, 20, 26, 31, 24, 25, 25, 22, 32, 28, 36, 21, 38, 18, 25, 21, 33, 40, 19, 38, 33, ... 37, 32, 31, 31, 38, 19, 37, 37, 32, 36, 34, 35, 35, 35, 37, 35, 39, 34, 24, 25, 18, 40, 33, 32, ... 23, 25, 19, 39, 38, 36, 32, 27, 22, 40, 28, 29, 25, 36, 26, 28, 32, 34, 34, 21, 21, 32, 19, 35, ... 30, 35, 26, 31, 38, 34, 33, 35, 37, 38, 36, 40, 22, 30, 28, 28, 29, 36, 24, 28, 28, 28, 26, 21, ... 35, 22, 32, 28, 19, 33, 18, 22, 36, 26, 19, 26, 30, 27, 28, 24, 36, 37, 20, 32, 38, 39, 38, 30, ... 32, 30, 26, 23, 19, 29, 33, 34, 23, 30, 32, 40, 36, 29, 39, 34, 34, 22, 22, 22, 36, 38, 38, 30, ... 26, 40, 34, 21, 34, 38, 32, 35, 35, 26, 28, 20, 40, 23, 24, 26, 24, 39, 21, 33, 31, 39, 39, 20, ... 22, 18, 23, 36, 32, 37, 36, 26, 30, 30, 30, 21, 22, 40, 38, 22, 27, 23, 21, 22, 20, 30, 31, 40, ... 19, 32, 24, 21, 27, 32, 30, 34, 18, 25, 22, 40, 23, 19, 24, 24, 25, 40, 27, 29, 22, 39, 38, 34, ... 39, 30, 31, 33, 34, 25, 20, 20, 20, 20, 24, 19, 21, 31, 31, 29, 38, 39, 33, 40, 24, 38, 37, 18, ... 24, 38, 38, 22, 40, 21, 36, 30, 21, 30, 35, 20, 25, 25, 29, 30, 20, 29, 29, 31, 20, 26, 26, 38, ... 37, 39, 31, 35, 36, 30, 38, 36, 23, 39, 39, 20, 30, 34, 21, 23, 21, 33, 30, 33, 32, 36, 18, 31, ... 32, 25, 23, 23, 21, 34, 18, 40, 21, 29, 29, 21, 38, 35, 38, 32, 38, 27, 23, 33, 29, 19, 20, 35, ... 29, 27, 28, 20, 40, 35, 40, 40, 20, 36, 38, 28, 30, 30, 36, 29, 27, 25, 33, 19, 27, 28, 34, 36, ... 27, 40, 38, 37, 31, 33, 38, 36, 25, 23, 22, 23, 34, 26, 24, 28, 32, 22, 18, 29, 19, 21, 27, 28, ... 35, 30, 40, 28, 37, 34, 24, 40, 33, 29, 30, 36, 25, 26, 26, 28, 34, 39, 34, 26, 24, 33, 38, 37, ... 36, 34, 37, 33, 25, 27, 30, 26, 21, 40, 26, 25, 25, 40, 28, 35, 36, 39, 33, 36, 40, 32, 36, 26, ... 24, 36, 27, 28, 26, 37, 36, 37, 36, 20, 34, 30, 32, 40, 20, 31, 23, 27, 19, 24, 23, 24, 25, 36, ... 26, 33, 30, 27, 26, 28, 28, 21, 31, 24, 27, 24, 29, 29, 28, 22, 20, 23, 35, 30, 37, 31, 31, 21, ... 32, 29, 27, 27, 30, 39, 34, 23, 35, 39, 27, 40, 28, 36, 35, 38, 21, 18, 21, 38, 37, 24, 21, 25, ... 35, 27, 35, 24, 36, 32, 20] >>> wage = [17000, 13000, 28000, 45000, 28000, 1200, 15500, 26400, 14000, 35000, 16400, 50000, 2600, 9000, ... 27000, 150000, 32000, 22000, 65000, 56000, 6500, 30000, 70000, 9000, 6000, 34000, 40000, 30000, ... 6400, 87000, 20000, 45000, 4800, 34000, 75000, 26000, 4000, 50000, 63000, 14700, 45000, 42000, ... 10000, 40000, 70000, 14000, 54000, 14000, 23000, 24400, 27900, 4700, 8000, 19000, 17300, 45000, ... 3900, 2900, 138000, 2100, 60000, 55000, 45000, 40000, 45700, 90000, 40000, 13000, 30000, 2000, ... 75000, 60000, 70000, 41000, 42000, 31000, 39000, 104000, 52000, 20000, 59000, 66000, 63000, 32000, ... 11000, 16000, 6400, 17000, 47700, 5000, 25000, 35000, 20000, 14000, 29000, 267000, 31000, 27000, ... 64000, 39600, 267000, 7100, 33000, 31500, 40000, 23000, 3000, 14000, 44000, 15100, 2600, 6200, ... 50000, 3000, 25000, 2000, 38000, 22000, 20000, 2500, 1500, 42000, 30000, 27000, 7000, 11900, 27000, ... 24000, 4300, 30200, 2500, 30000, 70000, 38700, 8000, 36000, 66000, 24000, 95000, 39000, 20000, 23000, ... 56000, 25200, 62000, 12000, 13000, 35000, 35000, 14000, 24000, 12000, 14000, 31000, 40000, 22900, 12000, ... 14000, 1600, 12000, 80000, 90000, 126000, 1600, 100000, 8000, 71000, 40000, 42000, 40000, 120000, 35000, ... 1200, 4000, 32000, 8000, 14500, 65000, 15000, 3000, 2000, 23900, 1000, 22000, 18200, 8000, 30000, 23000, ... 30000, 27000, 70000, 40000, 18000, 3100, 57000, 25000, 32000, 10000, 4000, 49000, 93000, 35000, 49000, ... 40000, 5500, 30000, 25000, 5700, 6000, 30000, 42900, 8000, 5300, 90000, 85000, 15000, 17000, 5600, ... 11500, 52000, 1000, 42000, 2100, 50000, 1500, 40000, 28000, 5300, 149000, 3200, 12000, 83000, 45000, ... 31200, 25000, 72000, 70000, 7000, 23000, 40000, 40000, 28000, 10000, 48000, 20000, 60000, 19000, 25000, ... 39000, 68000, 2300, 23900, 5000, 16300, 80000, 45000, 12000, 9000, 1300, 35000, 35000, 47000, 32000, ... 18000, 20000, 20000, 23400, 48000, 8000, 5200, 33500, 22000, 22000, 52000, 104000, 28000, 13000, 12000, ... 15000, 53000, 27000, 50000, 13900, 23000, 28100, 23000, 12000, 55000, 83000, 31000, 33200, 45000, 3000, ... 18000, 11000, 41000, 36000, 33600, 38000, 45000, 53000, 24000, 3000, 37500, 7700, 4800, 29000, 6600, ... 12400, 20000, 2000, 1100, 55000, 13400, 10000, 6000, 6000, 16000, 19000, 8300, 52000, 58000, 27000, ... 25000, 80000, 10000, 22000, 18000, 21000, 8000, 15200, 15000, 5000, 50000, 89000, 7000, 65000, 58000, ... 42000, 55000, 40000, 14000, 36000, 30000, 7900, 6000, 1200, 10000, 54000, 12800, 35000, 34000, 40000, ... 45000, 9600, 3300, 39000, 22000, 40000, 68000, 24400, 1000, 10800, 8400, 50000, 22000, 20000, 20000, ... 1300, 9000, 14200, 32000, 65000, 18000, 18000, 3000, 16700, 1500, 1400, 15000, 55000, 42000, 70000, ... 35000, 21600, 5800, 35000, 5700, 1700, 40000, 40000, 45000, 25000, 13000, 6400, 11000, 4200, 30000, ... 32000, 120000, 10000, 19000, 12000, 13000, 37000, 40000, 38000, 60000, 3100, 16000, 18000, 130000, ... 5000, 5000, 35000, 1000, 14300, 100000, 20000, 33000, 8000, 9400, 87000, 2500, 12000, 12000, 33000, ... 16500, 25500, 7200, 2300, 3100, 2100, 3200, 45000, 40000, 3800, 30000, 12000, 62000, 45000, 46000, ... 50000, 40000, 13000, 50000, 23000, 4000, 40000, 25000, 16000, 3000, 80000, 27000, 68000, 3500, ... 1300, 10000, 46000, 5800, 24000, 12500, 50000, 48000, 29000, 19000, 26000, 30000, 10000, 10000, ... 20000, 43000, 105000, 55000, 5000, 65000, 68000, 38000, 47000, 48700, 6100, 55000, 30000, 5000, 3500, ... 23400, 11400, 7000, 1300, 80000, 65000, 45000, 19000, 3000, 17100, 22900, 31200, 35000, 3000, 5000, ... 1000, 36000, 4800, 60000, 9800, 30000, 85000, 18000, 24000, 60000, 30000, 2000, 39000, 12000, 10500, ... 60000, 36000, 10500, 3600, 1200, 28600, 48000, 20800, 5400, 9600, 30000, 30000, 20000, 6700, 30000, ... 3200, 42000, 37000, 5000, 18000, 20000, 14000, 12000, 18000, 3000, 13500, 35000, 38000, 30000, 36000, ... 66000, 45000, 32000, 46000, 80000, 27000, 4000, 21000, 7600, 16000, 10300, 27000, 19000, 14000, 19000, ... 3100, 20000, 2700, 27000, 7000, 13600, 75000, 35000, 36000, 25000, 6000, 36000, 50000, 46000, 3000, ... 37000, 40000, 30000, 48800, 19700, 16000, 14000, 12000, 25000, 25000, 28600, 17000, 31200, 57000, ... 23000, 23500, 46000, 18700, 26700, 9900, 16000, 3000, 52000, 51000, 14000, 14400, 27000, 26000, 60000, ... 25000, 6000, 20000, 3000, 69000, 24800, 12000, 3100, 18000, 20000, 267000, 28000, 9800, 18200, 80000, ... 6800, 21100, 20000, 68000, 20000, 45000, 8000, 40000, 31900, 28000, 24000, 2000, 32000, 11000, 20000, ... 5900, 16100, 23900, 40000, 37500, 11000, 55000, 37500, 60000, 23000, 9500, 34500, 4000, 9000, 11200, ... 35200, 30000, 18000, 21800, 19700, 16700, 12500, 11300, 4000, 39000, 32000, 14000, 65000, 50000, ... 2000, 30400, 22000, 1600, 56000, 40000, 85000, 9000, 10000, 19000, 5300, 5200, 43000, 60000, 50000, ... 38000, 267000, 15600, 1800, 17000, 45000, 31000, 5000, 8000, 43000, 103000, 45000, 8800, 26000, 47000, ... 40000, 8000] >>> # Udacity data shows that people earn $1.8K more for each year of age and start with a $21K deficit >>> regress(age, wage) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE array([22214.93338944, ...) >> # Gainseville, FL census data shows 14 more new homes are built each year, starting with 517 completed in 1991 >> poly = regress([483, 576, 529, 551, 529, 551, 663, 639, 704, 675, 601, 621, 630, 778, 831, 610]) """ if y is None: y = x x = range(len(x)) if not isinstance(x[0], (float, int, np.float64, np.float32)): x = [row[0] for row in x] A = np.vstack([np.array(x), np.ones(len(x))]).T fit = np.linalg.lstsq(A, y, rcond=None) # if fit is None: # fit = [(1, 0), None, None, None] poly = fit[0][0], fit[0][-1] poly = regressionplot(x, y, poly) return poly
python
def regression_and_plot(x, y=None): """ Fit a line to the x, y data supplied and plot it along with teh raw samples >>> age = [25, 26, 33, 29, 27, 21, 26, 35, 21, 37, 21, 38, 18, 19, 36, 30, 29, 24, 24, 36, 36, 27, ... 33, 23, 21, 26, 27, 27, 24, 26, 25, 24, 22, 25, 40, 39, 19, 31, 33, 30, 33, 27, 40, 32, ... 31, 35, 26, 34, 27, 34, 33, 20, 19, 40, 39, 39, 37, 18, 35, 20, 28, 31, 30, 29, 31, 18, ... 40, 20, 32, 20, 34, 34, 25, 29, 40, 40, 39, 36, 39, 34, 34, 35, 39, 38, 33, 32, 21, 29, ... 36, 33, 30, 39, 21, 19, 38, 30, 40, 36, 34, 28, 37, 29, 39, 25, 36, 33, 37, 19, 28, 26, 18, 22, ... 40, 20, 40, 20, 39, 29, 26, 26, 22, 37, 34, 29, 24, 23, 21, 19, 29, 30, 23, 40, 30, 30, 19, 39, ... 39, 25, 36, 38, 24, 32, 34, 33, 36, 30, 35, 26, 28, 23, 25, 23, 40, 20, 26, 26, 22, 23, 18, 36, ... 34, 36, 35, 40, 39, 39, 33, 22, 37, 20, 37, 35, 20, 23, 37, 32, 25, 35, 35, 22, 21, 31, 40, 26, ... 24, 29, 37, 19, 33, 31, 29, 27, 21, 19, 39, 34, 34, 40, 26, 39, 35, 31, 35, 24, 19, 27, 27, 20, ... 28, 30, 23, 21, 20, 26, 31, 24, 25, 25, 22, 32, 28, 36, 21, 38, 18, 25, 21, 33, 40, 19, 38, 33, ... 37, 32, 31, 31, 38, 19, 37, 37, 32, 36, 34, 35, 35, 35, 37, 35, 39, 34, 24, 25, 18, 40, 33, 32, ... 23, 25, 19, 39, 38, 36, 32, 27, 22, 40, 28, 29, 25, 36, 26, 28, 32, 34, 34, 21, 21, 32, 19, 35, ... 30, 35, 26, 31, 38, 34, 33, 35, 37, 38, 36, 40, 22, 30, 28, 28, 29, 36, 24, 28, 28, 28, 26, 21, ... 35, 22, 32, 28, 19, 33, 18, 22, 36, 26, 19, 26, 30, 27, 28, 24, 36, 37, 20, 32, 38, 39, 38, 30, ... 32, 30, 26, 23, 19, 29, 33, 34, 23, 30, 32, 40, 36, 29, 39, 34, 34, 22, 22, 22, 36, 38, 38, 30, ... 26, 40, 34, 21, 34, 38, 32, 35, 35, 26, 28, 20, 40, 23, 24, 26, 24, 39, 21, 33, 31, 39, 39, 20, ... 22, 18, 23, 36, 32, 37, 36, 26, 30, 30, 30, 21, 22, 40, 38, 22, 27, 23, 21, 22, 20, 30, 31, 40, ... 19, 32, 24, 21, 27, 32, 30, 34, 18, 25, 22, 40, 23, 19, 24, 24, 25, 40, 27, 29, 22, 39, 38, 34, ... 39, 30, 31, 33, 34, 25, 20, 20, 20, 20, 24, 19, 21, 31, 31, 29, 38, 39, 33, 40, 24, 38, 37, 18, ... 24, 38, 38, 22, 40, 21, 36, 30, 21, 30, 35, 20, 25, 25, 29, 30, 20, 29, 29, 31, 20, 26, 26, 38, ... 37, 39, 31, 35, 36, 30, 38, 36, 23, 39, 39, 20, 30, 34, 21, 23, 21, 33, 30, 33, 32, 36, 18, 31, ... 32, 25, 23, 23, 21, 34, 18, 40, 21, 29, 29, 21, 38, 35, 38, 32, 38, 27, 23, 33, 29, 19, 20, 35, ... 29, 27, 28, 20, 40, 35, 40, 40, 20, 36, 38, 28, 30, 30, 36, 29, 27, 25, 33, 19, 27, 28, 34, 36, ... 27, 40, 38, 37, 31, 33, 38, 36, 25, 23, 22, 23, 34, 26, 24, 28, 32, 22, 18, 29, 19, 21, 27, 28, ... 35, 30, 40, 28, 37, 34, 24, 40, 33, 29, 30, 36, 25, 26, 26, 28, 34, 39, 34, 26, 24, 33, 38, 37, ... 36, 34, 37, 33, 25, 27, 30, 26, 21, 40, 26, 25, 25, 40, 28, 35, 36, 39, 33, 36, 40, 32, 36, 26, ... 24, 36, 27, 28, 26, 37, 36, 37, 36, 20, 34, 30, 32, 40, 20, 31, 23, 27, 19, 24, 23, 24, 25, 36, ... 26, 33, 30, 27, 26, 28, 28, 21, 31, 24, 27, 24, 29, 29, 28, 22, 20, 23, 35, 30, 37, 31, 31, 21, ... 32, 29, 27, 27, 30, 39, 34, 23, 35, 39, 27, 40, 28, 36, 35, 38, 21, 18, 21, 38, 37, 24, 21, 25, ... 35, 27, 35, 24, 36, 32, 20] >>> wage = [17000, 13000, 28000, 45000, 28000, 1200, 15500, 26400, 14000, 35000, 16400, 50000, 2600, 9000, ... 27000, 150000, 32000, 22000, 65000, 56000, 6500, 30000, 70000, 9000, 6000, 34000, 40000, 30000, ... 6400, 87000, 20000, 45000, 4800, 34000, 75000, 26000, 4000, 50000, 63000, 14700, 45000, 42000, ... 10000, 40000, 70000, 14000, 54000, 14000, 23000, 24400, 27900, 4700, 8000, 19000, 17300, 45000, ... 3900, 2900, 138000, 2100, 60000, 55000, 45000, 40000, 45700, 90000, 40000, 13000, 30000, 2000, ... 75000, 60000, 70000, 41000, 42000, 31000, 39000, 104000, 52000, 20000, 59000, 66000, 63000, 32000, ... 11000, 16000, 6400, 17000, 47700, 5000, 25000, 35000, 20000, 14000, 29000, 267000, 31000, 27000, ... 64000, 39600, 267000, 7100, 33000, 31500, 40000, 23000, 3000, 14000, 44000, 15100, 2600, 6200, ... 50000, 3000, 25000, 2000, 38000, 22000, 20000, 2500, 1500, 42000, 30000, 27000, 7000, 11900, 27000, ... 24000, 4300, 30200, 2500, 30000, 70000, 38700, 8000, 36000, 66000, 24000, 95000, 39000, 20000, 23000, ... 56000, 25200, 62000, 12000, 13000, 35000, 35000, 14000, 24000, 12000, 14000, 31000, 40000, 22900, 12000, ... 14000, 1600, 12000, 80000, 90000, 126000, 1600, 100000, 8000, 71000, 40000, 42000, 40000, 120000, 35000, ... 1200, 4000, 32000, 8000, 14500, 65000, 15000, 3000, 2000, 23900, 1000, 22000, 18200, 8000, 30000, 23000, ... 30000, 27000, 70000, 40000, 18000, 3100, 57000, 25000, 32000, 10000, 4000, 49000, 93000, 35000, 49000, ... 40000, 5500, 30000, 25000, 5700, 6000, 30000, 42900, 8000, 5300, 90000, 85000, 15000, 17000, 5600, ... 11500, 52000, 1000, 42000, 2100, 50000, 1500, 40000, 28000, 5300, 149000, 3200, 12000, 83000, 45000, ... 31200, 25000, 72000, 70000, 7000, 23000, 40000, 40000, 28000, 10000, 48000, 20000, 60000, 19000, 25000, ... 39000, 68000, 2300, 23900, 5000, 16300, 80000, 45000, 12000, 9000, 1300, 35000, 35000, 47000, 32000, ... 18000, 20000, 20000, 23400, 48000, 8000, 5200, 33500, 22000, 22000, 52000, 104000, 28000, 13000, 12000, ... 15000, 53000, 27000, 50000, 13900, 23000, 28100, 23000, 12000, 55000, 83000, 31000, 33200, 45000, 3000, ... 18000, 11000, 41000, 36000, 33600, 38000, 45000, 53000, 24000, 3000, 37500, 7700, 4800, 29000, 6600, ... 12400, 20000, 2000, 1100, 55000, 13400, 10000, 6000, 6000, 16000, 19000, 8300, 52000, 58000, 27000, ... 25000, 80000, 10000, 22000, 18000, 21000, 8000, 15200, 15000, 5000, 50000, 89000, 7000, 65000, 58000, ... 42000, 55000, 40000, 14000, 36000, 30000, 7900, 6000, 1200, 10000, 54000, 12800, 35000, 34000, 40000, ... 45000, 9600, 3300, 39000, 22000, 40000, 68000, 24400, 1000, 10800, 8400, 50000, 22000, 20000, 20000, ... 1300, 9000, 14200, 32000, 65000, 18000, 18000, 3000, 16700, 1500, 1400, 15000, 55000, 42000, 70000, ... 35000, 21600, 5800, 35000, 5700, 1700, 40000, 40000, 45000, 25000, 13000, 6400, 11000, 4200, 30000, ... 32000, 120000, 10000, 19000, 12000, 13000, 37000, 40000, 38000, 60000, 3100, 16000, 18000, 130000, ... 5000, 5000, 35000, 1000, 14300, 100000, 20000, 33000, 8000, 9400, 87000, 2500, 12000, 12000, 33000, ... 16500, 25500, 7200, 2300, 3100, 2100, 3200, 45000, 40000, 3800, 30000, 12000, 62000, 45000, 46000, ... 50000, 40000, 13000, 50000, 23000, 4000, 40000, 25000, 16000, 3000, 80000, 27000, 68000, 3500, ... 1300, 10000, 46000, 5800, 24000, 12500, 50000, 48000, 29000, 19000, 26000, 30000, 10000, 10000, ... 20000, 43000, 105000, 55000, 5000, 65000, 68000, 38000, 47000, 48700, 6100, 55000, 30000, 5000, 3500, ... 23400, 11400, 7000, 1300, 80000, 65000, 45000, 19000, 3000, 17100, 22900, 31200, 35000, 3000, 5000, ... 1000, 36000, 4800, 60000, 9800, 30000, 85000, 18000, 24000, 60000, 30000, 2000, 39000, 12000, 10500, ... 60000, 36000, 10500, 3600, 1200, 28600, 48000, 20800, 5400, 9600, 30000, 30000, 20000, 6700, 30000, ... 3200, 42000, 37000, 5000, 18000, 20000, 14000, 12000, 18000, 3000, 13500, 35000, 38000, 30000, 36000, ... 66000, 45000, 32000, 46000, 80000, 27000, 4000, 21000, 7600, 16000, 10300, 27000, 19000, 14000, 19000, ... 3100, 20000, 2700, 27000, 7000, 13600, 75000, 35000, 36000, 25000, 6000, 36000, 50000, 46000, 3000, ... 37000, 40000, 30000, 48800, 19700, 16000, 14000, 12000, 25000, 25000, 28600, 17000, 31200, 57000, ... 23000, 23500, 46000, 18700, 26700, 9900, 16000, 3000, 52000, 51000, 14000, 14400, 27000, 26000, 60000, ... 25000, 6000, 20000, 3000, 69000, 24800, 12000, 3100, 18000, 20000, 267000, 28000, 9800, 18200, 80000, ... 6800, 21100, 20000, 68000, 20000, 45000, 8000, 40000, 31900, 28000, 24000, 2000, 32000, 11000, 20000, ... 5900, 16100, 23900, 40000, 37500, 11000, 55000, 37500, 60000, 23000, 9500, 34500, 4000, 9000, 11200, ... 35200, 30000, 18000, 21800, 19700, 16700, 12500, 11300, 4000, 39000, 32000, 14000, 65000, 50000, ... 2000, 30400, 22000, 1600, 56000, 40000, 85000, 9000, 10000, 19000, 5300, 5200, 43000, 60000, 50000, ... 38000, 267000, 15600, 1800, 17000, 45000, 31000, 5000, 8000, 43000, 103000, 45000, 8800, 26000, 47000, ... 40000, 8000] >>> # Udacity data shows that people earn $1.8K more for each year of age and start with a $21K deficit >>> regress(age, wage) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE array([22214.93338944, ...) >> # Gainseville, FL census data shows 14 more new homes are built each year, starting with 517 completed in 1991 >> poly = regress([483, 576, 529, 551, 529, 551, 663, 639, 704, 675, 601, 621, 630, 778, 831, 610]) """ if y is None: y = x x = range(len(x)) if not isinstance(x[0], (float, int, np.float64, np.float32)): x = [row[0] for row in x] A = np.vstack([np.array(x), np.ones(len(x))]).T fit = np.linalg.lstsq(A, y, rcond=None) # if fit is None: # fit = [(1, 0), None, None, None] poly = fit[0][0], fit[0][-1] poly = regressionplot(x, y, poly) return poly
[ "def", "regression_and_plot", "(", "x", ",", "y", "=", "None", ")", ":", "if", "y", "is", "None", ":", "y", "=", "x", "x", "=", "range", "(", "len", "(", "x", ")", ")", "if", "not", "isinstance", "(", "x", "[", "0", "]", ",", "(", "float", ",", "int", ",", "np", ".", "float64", ",", "np", ".", "float32", ")", ")", ":", "x", "=", "[", "row", "[", "0", "]", "for", "row", "in", "x", "]", "A", "=", "np", ".", "vstack", "(", "[", "np", ".", "array", "(", "x", ")", ",", "np", ".", "ones", "(", "len", "(", "x", ")", ")", "]", ")", ".", "T", "fit", "=", "np", ".", "linalg", ".", "lstsq", "(", "A", ",", "y", ",", "rcond", "=", "None", ")", "# if fit is None:", "# fit = [(1, 0), None, None, None]", "poly", "=", "fit", "[", "0", "]", "[", "0", "]", ",", "fit", "[", "0", "]", "[", "-", "1", "]", "poly", "=", "regressionplot", "(", "x", ",", "y", ",", "poly", ")", "return", "poly" ]
Fit a line to the x, y data supplied and plot it along with teh raw samples >>> age = [25, 26, 33, 29, 27, 21, 26, 35, 21, 37, 21, 38, 18, 19, 36, 30, 29, 24, 24, 36, 36, 27, ... 33, 23, 21, 26, 27, 27, 24, 26, 25, 24, 22, 25, 40, 39, 19, 31, 33, 30, 33, 27, 40, 32, ... 31, 35, 26, 34, 27, 34, 33, 20, 19, 40, 39, 39, 37, 18, 35, 20, 28, 31, 30, 29, 31, 18, ... 40, 20, 32, 20, 34, 34, 25, 29, 40, 40, 39, 36, 39, 34, 34, 35, 39, 38, 33, 32, 21, 29, ... 36, 33, 30, 39, 21, 19, 38, 30, 40, 36, 34, 28, 37, 29, 39, 25, 36, 33, 37, 19, 28, 26, 18, 22, ... 40, 20, 40, 20, 39, 29, 26, 26, 22, 37, 34, 29, 24, 23, 21, 19, 29, 30, 23, 40, 30, 30, 19, 39, ... 39, 25, 36, 38, 24, 32, 34, 33, 36, 30, 35, 26, 28, 23, 25, 23, 40, 20, 26, 26, 22, 23, 18, 36, ... 34, 36, 35, 40, 39, 39, 33, 22, 37, 20, 37, 35, 20, 23, 37, 32, 25, 35, 35, 22, 21, 31, 40, 26, ... 24, 29, 37, 19, 33, 31, 29, 27, 21, 19, 39, 34, 34, 40, 26, 39, 35, 31, 35, 24, 19, 27, 27, 20, ... 28, 30, 23, 21, 20, 26, 31, 24, 25, 25, 22, 32, 28, 36, 21, 38, 18, 25, 21, 33, 40, 19, 38, 33, ... 37, 32, 31, 31, 38, 19, 37, 37, 32, 36, 34, 35, 35, 35, 37, 35, 39, 34, 24, 25, 18, 40, 33, 32, ... 23, 25, 19, 39, 38, 36, 32, 27, 22, 40, 28, 29, 25, 36, 26, 28, 32, 34, 34, 21, 21, 32, 19, 35, ... 30, 35, 26, 31, 38, 34, 33, 35, 37, 38, 36, 40, 22, 30, 28, 28, 29, 36, 24, 28, 28, 28, 26, 21, ... 35, 22, 32, 28, 19, 33, 18, 22, 36, 26, 19, 26, 30, 27, 28, 24, 36, 37, 20, 32, 38, 39, 38, 30, ... 32, 30, 26, 23, 19, 29, 33, 34, 23, 30, 32, 40, 36, 29, 39, 34, 34, 22, 22, 22, 36, 38, 38, 30, ... 26, 40, 34, 21, 34, 38, 32, 35, 35, 26, 28, 20, 40, 23, 24, 26, 24, 39, 21, 33, 31, 39, 39, 20, ... 22, 18, 23, 36, 32, 37, 36, 26, 30, 30, 30, 21, 22, 40, 38, 22, 27, 23, 21, 22, 20, 30, 31, 40, ... 19, 32, 24, 21, 27, 32, 30, 34, 18, 25, 22, 40, 23, 19, 24, 24, 25, 40, 27, 29, 22, 39, 38, 34, ... 39, 30, 31, 33, 34, 25, 20, 20, 20, 20, 24, 19, 21, 31, 31, 29, 38, 39, 33, 40, 24, 38, 37, 18, ... 24, 38, 38, 22, 40, 21, 36, 30, 21, 30, 35, 20, 25, 25, 29, 30, 20, 29, 29, 31, 20, 26, 26, 38, ... 37, 39, 31, 35, 36, 30, 38, 36, 23, 39, 39, 20, 30, 34, 21, 23, 21, 33, 30, 33, 32, 36, 18, 31, ... 32, 25, 23, 23, 21, 34, 18, 40, 21, 29, 29, 21, 38, 35, 38, 32, 38, 27, 23, 33, 29, 19, 20, 35, ... 29, 27, 28, 20, 40, 35, 40, 40, 20, 36, 38, 28, 30, 30, 36, 29, 27, 25, 33, 19, 27, 28, 34, 36, ... 27, 40, 38, 37, 31, 33, 38, 36, 25, 23, 22, 23, 34, 26, 24, 28, 32, 22, 18, 29, 19, 21, 27, 28, ... 35, 30, 40, 28, 37, 34, 24, 40, 33, 29, 30, 36, 25, 26, 26, 28, 34, 39, 34, 26, 24, 33, 38, 37, ... 36, 34, 37, 33, 25, 27, 30, 26, 21, 40, 26, 25, 25, 40, 28, 35, 36, 39, 33, 36, 40, 32, 36, 26, ... 24, 36, 27, 28, 26, 37, 36, 37, 36, 20, 34, 30, 32, 40, 20, 31, 23, 27, 19, 24, 23, 24, 25, 36, ... 26, 33, 30, 27, 26, 28, 28, 21, 31, 24, 27, 24, 29, 29, 28, 22, 20, 23, 35, 30, 37, 31, 31, 21, ... 32, 29, 27, 27, 30, 39, 34, 23, 35, 39, 27, 40, 28, 36, 35, 38, 21, 18, 21, 38, 37, 24, 21, 25, ... 35, 27, 35, 24, 36, 32, 20] >>> wage = [17000, 13000, 28000, 45000, 28000, 1200, 15500, 26400, 14000, 35000, 16400, 50000, 2600, 9000, ... 27000, 150000, 32000, 22000, 65000, 56000, 6500, 30000, 70000, 9000, 6000, 34000, 40000, 30000, ... 6400, 87000, 20000, 45000, 4800, 34000, 75000, 26000, 4000, 50000, 63000, 14700, 45000, 42000, ... 10000, 40000, 70000, 14000, 54000, 14000, 23000, 24400, 27900, 4700, 8000, 19000, 17300, 45000, ... 3900, 2900, 138000, 2100, 60000, 55000, 45000, 40000, 45700, 90000, 40000, 13000, 30000, 2000, ... 75000, 60000, 70000, 41000, 42000, 31000, 39000, 104000, 52000, 20000, 59000, 66000, 63000, 32000, ... 11000, 16000, 6400, 17000, 47700, 5000, 25000, 35000, 20000, 14000, 29000, 267000, 31000, 27000, ... 64000, 39600, 267000, 7100, 33000, 31500, 40000, 23000, 3000, 14000, 44000, 15100, 2600, 6200, ... 50000, 3000, 25000, 2000, 38000, 22000, 20000, 2500, 1500, 42000, 30000, 27000, 7000, 11900, 27000, ... 24000, 4300, 30200, 2500, 30000, 70000, 38700, 8000, 36000, 66000, 24000, 95000, 39000, 20000, 23000, ... 56000, 25200, 62000, 12000, 13000, 35000, 35000, 14000, 24000, 12000, 14000, 31000, 40000, 22900, 12000, ... 14000, 1600, 12000, 80000, 90000, 126000, 1600, 100000, 8000, 71000, 40000, 42000, 40000, 120000, 35000, ... 1200, 4000, 32000, 8000, 14500, 65000, 15000, 3000, 2000, 23900, 1000, 22000, 18200, 8000, 30000, 23000, ... 30000, 27000, 70000, 40000, 18000, 3100, 57000, 25000, 32000, 10000, 4000, 49000, 93000, 35000, 49000, ... 40000, 5500, 30000, 25000, 5700, 6000, 30000, 42900, 8000, 5300, 90000, 85000, 15000, 17000, 5600, ... 11500, 52000, 1000, 42000, 2100, 50000, 1500, 40000, 28000, 5300, 149000, 3200, 12000, 83000, 45000, ... 31200, 25000, 72000, 70000, 7000, 23000, 40000, 40000, 28000, 10000, 48000, 20000, 60000, 19000, 25000, ... 39000, 68000, 2300, 23900, 5000, 16300, 80000, 45000, 12000, 9000, 1300, 35000, 35000, 47000, 32000, ... 18000, 20000, 20000, 23400, 48000, 8000, 5200, 33500, 22000, 22000, 52000, 104000, 28000, 13000, 12000, ... 15000, 53000, 27000, 50000, 13900, 23000, 28100, 23000, 12000, 55000, 83000, 31000, 33200, 45000, 3000, ... 18000, 11000, 41000, 36000, 33600, 38000, 45000, 53000, 24000, 3000, 37500, 7700, 4800, 29000, 6600, ... 12400, 20000, 2000, 1100, 55000, 13400, 10000, 6000, 6000, 16000, 19000, 8300, 52000, 58000, 27000, ... 25000, 80000, 10000, 22000, 18000, 21000, 8000, 15200, 15000, 5000, 50000, 89000, 7000, 65000, 58000, ... 42000, 55000, 40000, 14000, 36000, 30000, 7900, 6000, 1200, 10000, 54000, 12800, 35000, 34000, 40000, ... 45000, 9600, 3300, 39000, 22000, 40000, 68000, 24400, 1000, 10800, 8400, 50000, 22000, 20000, 20000, ... 1300, 9000, 14200, 32000, 65000, 18000, 18000, 3000, 16700, 1500, 1400, 15000, 55000, 42000, 70000, ... 35000, 21600, 5800, 35000, 5700, 1700, 40000, 40000, 45000, 25000, 13000, 6400, 11000, 4200, 30000, ... 32000, 120000, 10000, 19000, 12000, 13000, 37000, 40000, 38000, 60000, 3100, 16000, 18000, 130000, ... 5000, 5000, 35000, 1000, 14300, 100000, 20000, 33000, 8000, 9400, 87000, 2500, 12000, 12000, 33000, ... 16500, 25500, 7200, 2300, 3100, 2100, 3200, 45000, 40000, 3800, 30000, 12000, 62000, 45000, 46000, ... 50000, 40000, 13000, 50000, 23000, 4000, 40000, 25000, 16000, 3000, 80000, 27000, 68000, 3500, ... 1300, 10000, 46000, 5800, 24000, 12500, 50000, 48000, 29000, 19000, 26000, 30000, 10000, 10000, ... 20000, 43000, 105000, 55000, 5000, 65000, 68000, 38000, 47000, 48700, 6100, 55000, 30000, 5000, 3500, ... 23400, 11400, 7000, 1300, 80000, 65000, 45000, 19000, 3000, 17100, 22900, 31200, 35000, 3000, 5000, ... 1000, 36000, 4800, 60000, 9800, 30000, 85000, 18000, 24000, 60000, 30000, 2000, 39000, 12000, 10500, ... 60000, 36000, 10500, 3600, 1200, 28600, 48000, 20800, 5400, 9600, 30000, 30000, 20000, 6700, 30000, ... 3200, 42000, 37000, 5000, 18000, 20000, 14000, 12000, 18000, 3000, 13500, 35000, 38000, 30000, 36000, ... 66000, 45000, 32000, 46000, 80000, 27000, 4000, 21000, 7600, 16000, 10300, 27000, 19000, 14000, 19000, ... 3100, 20000, 2700, 27000, 7000, 13600, 75000, 35000, 36000, 25000, 6000, 36000, 50000, 46000, 3000, ... 37000, 40000, 30000, 48800, 19700, 16000, 14000, 12000, 25000, 25000, 28600, 17000, 31200, 57000, ... 23000, 23500, 46000, 18700, 26700, 9900, 16000, 3000, 52000, 51000, 14000, 14400, 27000, 26000, 60000, ... 25000, 6000, 20000, 3000, 69000, 24800, 12000, 3100, 18000, 20000, 267000, 28000, 9800, 18200, 80000, ... 6800, 21100, 20000, 68000, 20000, 45000, 8000, 40000, 31900, 28000, 24000, 2000, 32000, 11000, 20000, ... 5900, 16100, 23900, 40000, 37500, 11000, 55000, 37500, 60000, 23000, 9500, 34500, 4000, 9000, 11200, ... 35200, 30000, 18000, 21800, 19700, 16700, 12500, 11300, 4000, 39000, 32000, 14000, 65000, 50000, ... 2000, 30400, 22000, 1600, 56000, 40000, 85000, 9000, 10000, 19000, 5300, 5200, 43000, 60000, 50000, ... 38000, 267000, 15600, 1800, 17000, 45000, 31000, 5000, 8000, 43000, 103000, 45000, 8800, 26000, 47000, ... 40000, 8000] >>> # Udacity data shows that people earn $1.8K more for each year of age and start with a $21K deficit >>> regress(age, wage) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE array([22214.93338944, ...) >> # Gainseville, FL census data shows 14 more new homes are built each year, starting with 517 completed in 1991 >> poly = regress([483, 576, 529, 551, 529, 551, 663, 639, 704, 675, 601, 621, 630, 778, 831, 610])
[ "Fit", "a", "line", "to", "the", "x", "y", "data", "supplied", "and", "plot", "it", "along", "with", "teh", "raw", "samples" ]
c43445b14afddfdeadc5f3076675c9e8fc1ee67c
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/plots.py#L125-L225
train
totalgood/pugnlp
src/pugnlp/plots.py
scatmat
def scatmat(df, category=None, colors='rgob', num_plots=4, num_topics=100, num_columns=4, show=False, block=False, data_path=DATA_PATH, save=False, verbose=1): """Scatter plot with colored markers depending on the discrete values in a "category" column FIXME: empty plots that dont go away, Plot and/save scatter matrix in groups of num_columns topics""" if category is None: category = list(df.columns)[-1] if isinstance(category, (str, bytes, int)) and category in df.columns: category = df[category] else: category = pd.Series(category) suffix = '{}x{}'.format(*list(df.shape)) # suffix = compose_suffix(len(df), num_topics, save) # save = bool(save) for i in range(min(num_plots * num_columns, num_topics) / num_plots): scatter_matrix(df[df.columns[i * num_columns:(i + 1) * num_columns]], marker='+', c=[colors[int(x) % len(colors)] for x in category.values], figsize=(18, 12)) if save: name = 'scatmat_topics_{}-{}.jpg'.format(i * num_columns, (i + 1) * num_columns) + suffix plt.savefig(os.path.join(data_path, name + '.jpg')) if show: if block: plt.show() else: plt.show(block=False)
python
def scatmat(df, category=None, colors='rgob', num_plots=4, num_topics=100, num_columns=4, show=False, block=False, data_path=DATA_PATH, save=False, verbose=1): """Scatter plot with colored markers depending on the discrete values in a "category" column FIXME: empty plots that dont go away, Plot and/save scatter matrix in groups of num_columns topics""" if category is None: category = list(df.columns)[-1] if isinstance(category, (str, bytes, int)) and category in df.columns: category = df[category] else: category = pd.Series(category) suffix = '{}x{}'.format(*list(df.shape)) # suffix = compose_suffix(len(df), num_topics, save) # save = bool(save) for i in range(min(num_plots * num_columns, num_topics) / num_plots): scatter_matrix(df[df.columns[i * num_columns:(i + 1) * num_columns]], marker='+', c=[colors[int(x) % len(colors)] for x in category.values], figsize=(18, 12)) if save: name = 'scatmat_topics_{}-{}.jpg'.format(i * num_columns, (i + 1) * num_columns) + suffix plt.savefig(os.path.join(data_path, name + '.jpg')) if show: if block: plt.show() else: plt.show(block=False)
[ "def", "scatmat", "(", "df", ",", "category", "=", "None", ",", "colors", "=", "'rgob'", ",", "num_plots", "=", "4", ",", "num_topics", "=", "100", ",", "num_columns", "=", "4", ",", "show", "=", "False", ",", "block", "=", "False", ",", "data_path", "=", "DATA_PATH", ",", "save", "=", "False", ",", "verbose", "=", "1", ")", ":", "if", "category", "is", "None", ":", "category", "=", "list", "(", "df", ".", "columns", ")", "[", "-", "1", "]", "if", "isinstance", "(", "category", ",", "(", "str", ",", "bytes", ",", "int", ")", ")", "and", "category", "in", "df", ".", "columns", ":", "category", "=", "df", "[", "category", "]", "else", ":", "category", "=", "pd", ".", "Series", "(", "category", ")", "suffix", "=", "'{}x{}'", ".", "format", "(", "*", "list", "(", "df", ".", "shape", ")", ")", "# suffix = compose_suffix(len(df), num_topics, save)", "# save = bool(save)", "for", "i", "in", "range", "(", "min", "(", "num_plots", "*", "num_columns", ",", "num_topics", ")", "/", "num_plots", ")", ":", "scatter_matrix", "(", "df", "[", "df", ".", "columns", "[", "i", "*", "num_columns", ":", "(", "i", "+", "1", ")", "*", "num_columns", "]", "]", ",", "marker", "=", "'+'", ",", "c", "=", "[", "colors", "[", "int", "(", "x", ")", "%", "len", "(", "colors", ")", "]", "for", "x", "in", "category", ".", "values", "]", ",", "figsize", "=", "(", "18", ",", "12", ")", ")", "if", "save", ":", "name", "=", "'scatmat_topics_{}-{}.jpg'", ".", "format", "(", "i", "*", "num_columns", ",", "(", "i", "+", "1", ")", "*", "num_columns", ")", "+", "suffix", "plt", ".", "savefig", "(", "os", ".", "path", ".", "join", "(", "data_path", ",", "name", "+", "'.jpg'", ")", ")", "if", "show", ":", "if", "block", ":", "plt", ".", "show", "(", ")", "else", ":", "plt", ".", "show", "(", "block", "=", "False", ")" ]
Scatter plot with colored markers depending on the discrete values in a "category" column FIXME: empty plots that dont go away, Plot and/save scatter matrix in groups of num_columns topics
[ "Scatter", "plot", "with", "colored", "markers", "depending", "on", "the", "discrete", "values", "in", "a", "category", "column" ]
c43445b14afddfdeadc5f3076675c9e8fc1ee67c
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/plots.py#L296-L323
train
totalgood/pugnlp
src/pugnlp/plots.py
point_cloud
def point_cloud(df, columns=[0, 1, 2]): """3-D Point cloud for plotting things like mesh models of horses ;)""" df = df if isinstance(df, pd.DataFrame) else pd.DataFrame(df) if not all(c in df.columns for c in columns): columns = list(df.columns)[:3] fig = plt.figure() ax = fig.add_subplot(111, projection='3d') # noqa Axes3D.scatter(*[df[columns[i]] for i in range(3)], zdir='z', s=20, c=None, depthshade=True) return ax
python
def point_cloud(df, columns=[0, 1, 2]): """3-D Point cloud for plotting things like mesh models of horses ;)""" df = df if isinstance(df, pd.DataFrame) else pd.DataFrame(df) if not all(c in df.columns for c in columns): columns = list(df.columns)[:3] fig = plt.figure() ax = fig.add_subplot(111, projection='3d') # noqa Axes3D.scatter(*[df[columns[i]] for i in range(3)], zdir='z', s=20, c=None, depthshade=True) return ax
[ "def", "point_cloud", "(", "df", ",", "columns", "=", "[", "0", ",", "1", ",", "2", "]", ")", ":", "df", "=", "df", "if", "isinstance", "(", "df", ",", "pd", ".", "DataFrame", ")", "else", "pd", ".", "DataFrame", "(", "df", ")", "if", "not", "all", "(", "c", "in", "df", ".", "columns", "for", "c", "in", "columns", ")", ":", "columns", "=", "list", "(", "df", ".", "columns", ")", "[", ":", "3", "]", "fig", "=", "plt", ".", "figure", "(", ")", "ax", "=", "fig", ".", "add_subplot", "(", "111", ",", "projection", "=", "'3d'", ")", "# noqa", "Axes3D", ".", "scatter", "(", "*", "[", "df", "[", "columns", "[", "i", "]", "]", "for", "i", "in", "range", "(", "3", ")", "]", ",", "zdir", "=", "'z'", ",", "s", "=", "20", ",", "c", "=", "None", ",", "depthshade", "=", "True", ")", "return", "ax" ]
3-D Point cloud for plotting things like mesh models of horses ;)
[ "3", "-", "D", "Point", "cloud", "for", "plotting", "things", "like", "mesh", "models", "of", "horses", ";", ")" ]
c43445b14afddfdeadc5f3076675c9e8fc1ee67c
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/plots.py#L326-L335
train
totalgood/pugnlp
src/pugnlp/plots.py
ColorMap.show
def show(self, block=False): """ Display the last image drawn """ try: plt.show(block=block) except ValueError: plt.show()
python
def show(self, block=False): """ Display the last image drawn """ try: plt.show(block=block) except ValueError: plt.show()
[ "def", "show", "(", "self", ",", "block", "=", "False", ")", ":", "try", ":", "plt", ".", "show", "(", "block", "=", "block", ")", "except", "ValueError", ":", "plt", ".", "show", "(", ")" ]
Display the last image drawn
[ "Display", "the", "last", "image", "drawn" ]
c43445b14afddfdeadc5f3076675c9e8fc1ee67c
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/plots.py#L284-L289
train
totalgood/pugnlp
src/pugnlp/plots.py
ColorMap.save
def save(self, filename): """ save colormap to file""" plt.savefig(filename, fig=self.fig, facecolor='black', edgecolor='black')
python
def save(self, filename): """ save colormap to file""" plt.savefig(filename, fig=self.fig, facecolor='black', edgecolor='black')
[ "def", "save", "(", "self", ",", "filename", ")", ":", "plt", ".", "savefig", "(", "filename", ",", "fig", "=", "self", ".", "fig", ",", "facecolor", "=", "'black'", ",", "edgecolor", "=", "'black'", ")" ]
save colormap to file
[ "save", "colormap", "to", "file" ]
c43445b14afddfdeadc5f3076675c9e8fc1ee67c
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/plots.py#L291-L293
train
DarkEnergySurvey/ugali
ugali/analysis/model.py
Model.getp
def getp(self, name): """ Get the named parameter. Parameters ---------- name : string The parameter name. Returns ------- param : The parameter object. """ name = self._mapping.get(name,name) return self.params[name]
python
def getp(self, name): """ Get the named parameter. Parameters ---------- name : string The parameter name. Returns ------- param : The parameter object. """ name = self._mapping.get(name,name) return self.params[name]
[ "def", "getp", "(", "self", ",", "name", ")", ":", "name", "=", "self", ".", "_mapping", ".", "get", "(", "name", ",", "name", ")", "return", "self", ".", "params", "[", "name", "]" ]
Get the named parameter. Parameters ---------- name : string The parameter name. Returns ------- param : The parameter object.
[ "Get", "the", "named", "parameter", "." ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/analysis/model.py#L98-L113
train
DarkEnergySurvey/ugali
ugali/analysis/color_lut.py
readColorLUT
def readColorLUT(infile, distance_modulus, mag_1, mag_2, mag_err_1, mag_err_2): """ Take in a color look-up table and return the signal color evaluated for each object. Consider making the argument a Catalog object rather than magnitudes and uncertainties. """ reader = pyfits.open(infile) distance_modulus_array = reader['DISTANCE_MODULUS'].data.field('DISTANCE_MODULUS') if not numpy.any(numpy.fabs(distance_modulus_array - distance_modulus) < 1.e-3): logger.warning("Distance modulus %.2f not available in file %s"%(distance_modulus, infile)) logger.warning(' available distance moduli:'+str(distance_modulus_array)) return False distance_modulus_key = '%.2f'%(distance_modulus_array[numpy.argmin(numpy.fabs(distance_modulus_array - distance_modulus))]) bins_mag_err = reader['BINS_MAG_ERR'].data.field('BINS_MAG_ERR') bins_mag_1 = reader['BINS_MAG_1'].data.field('BINS_MAG_1') bins_mag_2 = reader['BINS_MAG_2'].data.field('BINS_MAG_2') # Note that magnitude uncertainty is always assigned by rounding up, is this the right thing to do? index_mag_err_1 = numpy.clip(numpy.digitize(mag_err_1, bins_mag_err) - 1, 0, len(bins_mag_err) - 2) index_mag_err_2 = numpy.clip(numpy.digitize(mag_err_2, bins_mag_err) - 1, 0, len(bins_mag_err) - 2) u_color = numpy.zeros(len(mag_1)) for index_mag_err_1_select in range(0, len(bins_mag_err) - 1): for index_mag_err_2_select in range(0, len(bins_mag_err) - 1): cut = numpy.logical_and(index_mag_err_1 == index_mag_err_1_select, index_mag_err_2 == index_mag_err_2_select) if numpy.sum(cut) < 1: continue histo = reader[distance_modulus_key].data.field('%i%i'%(index_mag_err_1_select, index_mag_err_2_select)) u_color[cut] = ugali.utils.binning.take2D(histo, mag_2[cut], mag_1[cut], bins_mag_2, bins_mag_1) reader.close() return u_color
python
def readColorLUT(infile, distance_modulus, mag_1, mag_2, mag_err_1, mag_err_2): """ Take in a color look-up table and return the signal color evaluated for each object. Consider making the argument a Catalog object rather than magnitudes and uncertainties. """ reader = pyfits.open(infile) distance_modulus_array = reader['DISTANCE_MODULUS'].data.field('DISTANCE_MODULUS') if not numpy.any(numpy.fabs(distance_modulus_array - distance_modulus) < 1.e-3): logger.warning("Distance modulus %.2f not available in file %s"%(distance_modulus, infile)) logger.warning(' available distance moduli:'+str(distance_modulus_array)) return False distance_modulus_key = '%.2f'%(distance_modulus_array[numpy.argmin(numpy.fabs(distance_modulus_array - distance_modulus))]) bins_mag_err = reader['BINS_MAG_ERR'].data.field('BINS_MAG_ERR') bins_mag_1 = reader['BINS_MAG_1'].data.field('BINS_MAG_1') bins_mag_2 = reader['BINS_MAG_2'].data.field('BINS_MAG_2') # Note that magnitude uncertainty is always assigned by rounding up, is this the right thing to do? index_mag_err_1 = numpy.clip(numpy.digitize(mag_err_1, bins_mag_err) - 1, 0, len(bins_mag_err) - 2) index_mag_err_2 = numpy.clip(numpy.digitize(mag_err_2, bins_mag_err) - 1, 0, len(bins_mag_err) - 2) u_color = numpy.zeros(len(mag_1)) for index_mag_err_1_select in range(0, len(bins_mag_err) - 1): for index_mag_err_2_select in range(0, len(bins_mag_err) - 1): cut = numpy.logical_and(index_mag_err_1 == index_mag_err_1_select, index_mag_err_2 == index_mag_err_2_select) if numpy.sum(cut) < 1: continue histo = reader[distance_modulus_key].data.field('%i%i'%(index_mag_err_1_select, index_mag_err_2_select)) u_color[cut] = ugali.utils.binning.take2D(histo, mag_2[cut], mag_1[cut], bins_mag_2, bins_mag_1) reader.close() return u_color
[ "def", "readColorLUT", "(", "infile", ",", "distance_modulus", ",", "mag_1", ",", "mag_2", ",", "mag_err_1", ",", "mag_err_2", ")", ":", "reader", "=", "pyfits", ".", "open", "(", "infile", ")", "distance_modulus_array", "=", "reader", "[", "'DISTANCE_MODULUS'", "]", ".", "data", ".", "field", "(", "'DISTANCE_MODULUS'", ")", "if", "not", "numpy", ".", "any", "(", "numpy", ".", "fabs", "(", "distance_modulus_array", "-", "distance_modulus", ")", "<", "1.e-3", ")", ":", "logger", ".", "warning", "(", "\"Distance modulus %.2f not available in file %s\"", "%", "(", "distance_modulus", ",", "infile", ")", ")", "logger", ".", "warning", "(", "' available distance moduli:'", "+", "str", "(", "distance_modulus_array", ")", ")", "return", "False", "distance_modulus_key", "=", "'%.2f'", "%", "(", "distance_modulus_array", "[", "numpy", ".", "argmin", "(", "numpy", ".", "fabs", "(", "distance_modulus_array", "-", "distance_modulus", ")", ")", "]", ")", "bins_mag_err", "=", "reader", "[", "'BINS_MAG_ERR'", "]", ".", "data", ".", "field", "(", "'BINS_MAG_ERR'", ")", "bins_mag_1", "=", "reader", "[", "'BINS_MAG_1'", "]", ".", "data", ".", "field", "(", "'BINS_MAG_1'", ")", "bins_mag_2", "=", "reader", "[", "'BINS_MAG_2'", "]", ".", "data", ".", "field", "(", "'BINS_MAG_2'", ")", "# Note that magnitude uncertainty is always assigned by rounding up, is this the right thing to do?", "index_mag_err_1", "=", "numpy", ".", "clip", "(", "numpy", ".", "digitize", "(", "mag_err_1", ",", "bins_mag_err", ")", "-", "1", ",", "0", ",", "len", "(", "bins_mag_err", ")", "-", "2", ")", "index_mag_err_2", "=", "numpy", ".", "clip", "(", "numpy", ".", "digitize", "(", "mag_err_2", ",", "bins_mag_err", ")", "-", "1", ",", "0", ",", "len", "(", "bins_mag_err", ")", "-", "2", ")", "u_color", "=", "numpy", ".", "zeros", "(", "len", "(", "mag_1", ")", ")", "for", "index_mag_err_1_select", "in", "range", "(", "0", ",", "len", "(", "bins_mag_err", ")", "-", "1", ")", ":", "for", "index_mag_err_2_select", "in", "range", "(", "0", ",", "len", "(", "bins_mag_err", ")", "-", "1", ")", ":", "cut", "=", "numpy", ".", "logical_and", "(", "index_mag_err_1", "==", "index_mag_err_1_select", ",", "index_mag_err_2", "==", "index_mag_err_2_select", ")", "if", "numpy", ".", "sum", "(", "cut", ")", "<", "1", ":", "continue", "histo", "=", "reader", "[", "distance_modulus_key", "]", ".", "data", ".", "field", "(", "'%i%i'", "%", "(", "index_mag_err_1_select", ",", "index_mag_err_2_select", ")", ")", "u_color", "[", "cut", "]", "=", "ugali", ".", "utils", ".", "binning", ".", "take2D", "(", "histo", ",", "mag_2", "[", "cut", "]", ",", "mag_1", "[", "cut", "]", ",", "bins_mag_2", ",", "bins_mag_1", ")", "reader", ".", "close", "(", ")", "return", "u_color" ]
Take in a color look-up table and return the signal color evaluated for each object. Consider making the argument a Catalog object rather than magnitudes and uncertainties.
[ "Take", "in", "a", "color", "look", "-", "up", "table", "and", "return", "the", "signal", "color", "evaluated", "for", "each", "object", ".", "Consider", "making", "the", "argument", "a", "Catalog", "object", "rather", "than", "magnitudes", "and", "uncertainties", "." ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/analysis/color_lut.py#L335-L374
train
consbio/ncdjango
ncdjango/utils.py
auto_memoize
def auto_memoize(func): """ Based on django.util.functional.memoize. Automatically memoizes instace methods for the lifespan of an object. Only works with methods taking non-keword arguments. Note that the args to the function must be usable as dictionary keys. Also, the first argument MUST be self. This decorator will not work for functions or class methods, only object methods. """ @wraps(func) def wrapper(*args): inst = args[0] inst._memoized_values = getattr(inst, '_memoized_values', {}) key = (func, args[1:]) if key not in inst._memoized_values: inst._memoized_values[key] = func(*args) return inst._memoized_values[key] return wrapper
python
def auto_memoize(func): """ Based on django.util.functional.memoize. Automatically memoizes instace methods for the lifespan of an object. Only works with methods taking non-keword arguments. Note that the args to the function must be usable as dictionary keys. Also, the first argument MUST be self. This decorator will not work for functions or class methods, only object methods. """ @wraps(func) def wrapper(*args): inst = args[0] inst._memoized_values = getattr(inst, '_memoized_values', {}) key = (func, args[1:]) if key not in inst._memoized_values: inst._memoized_values[key] = func(*args) return inst._memoized_values[key] return wrapper
[ "def", "auto_memoize", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "wrapper", "(", "*", "args", ")", ":", "inst", "=", "args", "[", "0", "]", "inst", ".", "_memoized_values", "=", "getattr", "(", "inst", ",", "'_memoized_values'", ",", "{", "}", ")", "key", "=", "(", "func", ",", "args", "[", "1", ":", "]", ")", "if", "key", "not", "in", "inst", ".", "_memoized_values", ":", "inst", ".", "_memoized_values", "[", "key", "]", "=", "func", "(", "*", "args", ")", "return", "inst", ".", "_memoized_values", "[", "key", "]", "return", "wrapper" ]
Based on django.util.functional.memoize. Automatically memoizes instace methods for the lifespan of an object. Only works with methods taking non-keword arguments. Note that the args to the function must be usable as dictionary keys. Also, the first argument MUST be self. This decorator will not work for functions or class methods, only object methods.
[ "Based", "on", "django", ".", "util", ".", "functional", ".", "memoize", ".", "Automatically", "memoizes", "instace", "methods", "for", "the", "lifespan", "of", "an", "object", ".", "Only", "works", "with", "methods", "taking", "non", "-", "keword", "arguments", ".", "Note", "that", "the", "args", "to", "the", "function", "must", "be", "usable", "as", "dictionary", "keys", ".", "Also", "the", "first", "argument", "MUST", "be", "self", ".", "This", "decorator", "will", "not", "work", "for", "functions", "or", "class", "methods", "only", "object", "methods", "." ]
f807bfd1e4083ab29fbc3c4d4418be108383a710
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/utils.py#L15-L31
train
consbio/ncdjango
ncdjango/utils.py
best_fit
def best_fit(li, value): """For a sorted list li, returns the closest item to value""" index = min(bisect_left(li, value), len(li) - 1) if index in (0, len(li)): return index if li[index] - value < value - li[index-1]: return index else: return index-1
python
def best_fit(li, value): """For a sorted list li, returns the closest item to value""" index = min(bisect_left(li, value), len(li) - 1) if index in (0, len(li)): return index if li[index] - value < value - li[index-1]: return index else: return index-1
[ "def", "best_fit", "(", "li", ",", "value", ")", ":", "index", "=", "min", "(", "bisect_left", "(", "li", ",", "value", ")", ",", "len", "(", "li", ")", "-", "1", ")", "if", "index", "in", "(", "0", ",", "len", "(", "li", ")", ")", ":", "return", "index", "if", "li", "[", "index", "]", "-", "value", "<", "value", "-", "li", "[", "index", "-", "1", "]", ":", "return", "index", "else", ":", "return", "index", "-", "1" ]
For a sorted list li, returns the closest item to value
[ "For", "a", "sorted", "list", "li", "returns", "the", "closest", "item", "to", "value" ]
f807bfd1e4083ab29fbc3c4d4418be108383a710
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/utils.py#L34-L45
train
consbio/ncdjango
ncdjango/utils.py
proj4_to_epsg
def proj4_to_epsg(projection): """Attempts to convert a PROJ4 projection object to an EPSG code and returns None if conversion fails""" def make_definition(value): return {x.strip().lower() for x in value.split('+') if x} # Use the EPSG in the definition if available match = EPSG_RE.search(projection.srs) if match: return int(match.group(1)) # Otherwise, try to look up the EPSG from the pyproj data file pyproj_data_dir = os.path.join(os.path.dirname(pyproj.__file__), 'data') pyproj_epsg_file = os.path.join(pyproj_data_dir, 'epsg') if os.path.exists(pyproj_epsg_file): definition = make_definition(projection.srs) f = open(pyproj_epsg_file, 'r') for line in f.readlines(): match = PYPROJ_EPSG_FILE_RE.search(line) if match: file_definition = make_definition(match.group(2)) if definition == file_definition: return int(match.group(1)) return None
python
def proj4_to_epsg(projection): """Attempts to convert a PROJ4 projection object to an EPSG code and returns None if conversion fails""" def make_definition(value): return {x.strip().lower() for x in value.split('+') if x} # Use the EPSG in the definition if available match = EPSG_RE.search(projection.srs) if match: return int(match.group(1)) # Otherwise, try to look up the EPSG from the pyproj data file pyproj_data_dir = os.path.join(os.path.dirname(pyproj.__file__), 'data') pyproj_epsg_file = os.path.join(pyproj_data_dir, 'epsg') if os.path.exists(pyproj_epsg_file): definition = make_definition(projection.srs) f = open(pyproj_epsg_file, 'r') for line in f.readlines(): match = PYPROJ_EPSG_FILE_RE.search(line) if match: file_definition = make_definition(match.group(2)) if definition == file_definition: return int(match.group(1)) return None
[ "def", "proj4_to_epsg", "(", "projection", ")", ":", "def", "make_definition", "(", "value", ")", ":", "return", "{", "x", ".", "strip", "(", ")", ".", "lower", "(", ")", "for", "x", "in", "value", ".", "split", "(", "'+'", ")", "if", "x", "}", "# Use the EPSG in the definition if available", "match", "=", "EPSG_RE", ".", "search", "(", "projection", ".", "srs", ")", "if", "match", ":", "return", "int", "(", "match", ".", "group", "(", "1", ")", ")", "# Otherwise, try to look up the EPSG from the pyproj data file", "pyproj_data_dir", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "pyproj", ".", "__file__", ")", ",", "'data'", ")", "pyproj_epsg_file", "=", "os", ".", "path", ".", "join", "(", "pyproj_data_dir", ",", "'epsg'", ")", "if", "os", ".", "path", ".", "exists", "(", "pyproj_epsg_file", ")", ":", "definition", "=", "make_definition", "(", "projection", ".", "srs", ")", "f", "=", "open", "(", "pyproj_epsg_file", ",", "'r'", ")", "for", "line", "in", "f", ".", "readlines", "(", ")", ":", "match", "=", "PYPROJ_EPSG_FILE_RE", ".", "search", "(", "line", ")", "if", "match", ":", "file_definition", "=", "make_definition", "(", "match", ".", "group", "(", "2", ")", ")", "if", "definition", "==", "file_definition", ":", "return", "int", "(", "match", ".", "group", "(", "1", ")", ")", "return", "None" ]
Attempts to convert a PROJ4 projection object to an EPSG code and returns None if conversion fails
[ "Attempts", "to", "convert", "a", "PROJ4", "projection", "object", "to", "an", "EPSG", "code", "and", "returns", "None", "if", "conversion", "fails" ]
f807bfd1e4083ab29fbc3c4d4418be108383a710
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/utils.py#L48-L71
train
consbio/ncdjango
ncdjango/utils.py
wkt_to_proj4
def wkt_to_proj4(wkt): """Converts a well-known text string to a pyproj.Proj object""" srs = osgeo.osr.SpatialReference() srs.ImportFromWkt(wkt) return pyproj.Proj(str(srs.ExportToProj4()))
python
def wkt_to_proj4(wkt): """Converts a well-known text string to a pyproj.Proj object""" srs = osgeo.osr.SpatialReference() srs.ImportFromWkt(wkt) return pyproj.Proj(str(srs.ExportToProj4()))
[ "def", "wkt_to_proj4", "(", "wkt", ")", ":", "srs", "=", "osgeo", ".", "osr", ".", "SpatialReference", "(", ")", "srs", ".", "ImportFromWkt", "(", "wkt", ")", "return", "pyproj", ".", "Proj", "(", "str", "(", "srs", ".", "ExportToProj4", "(", ")", ")", ")" ]
Converts a well-known text string to a pyproj.Proj object
[ "Converts", "a", "well", "-", "known", "text", "string", "to", "a", "pyproj", ".", "Proj", "object" ]
f807bfd1e4083ab29fbc3c4d4418be108383a710
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/utils.py#L74-L80
train
consbio/ncdjango
ncdjango/utils.py
proj4_to_wkt
def proj4_to_wkt(projection): """Converts a pyproj.Proj object to a well-known text string""" srs = osgeo.osr.SpatialReference() srs.ImportFromProj4(projection.srs) return srs.ExportToWkt()
python
def proj4_to_wkt(projection): """Converts a pyproj.Proj object to a well-known text string""" srs = osgeo.osr.SpatialReference() srs.ImportFromProj4(projection.srs) return srs.ExportToWkt()
[ "def", "proj4_to_wkt", "(", "projection", ")", ":", "srs", "=", "osgeo", ".", "osr", ".", "SpatialReference", "(", ")", "srs", ".", "ImportFromProj4", "(", "projection", ".", "srs", ")", "return", "srs", ".", "ExportToWkt", "(", ")" ]
Converts a pyproj.Proj object to a well-known text string
[ "Converts", "a", "pyproj", ".", "Proj", "object", "to", "a", "well", "-", "known", "text", "string" ]
f807bfd1e4083ab29fbc3c4d4418be108383a710
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/utils.py#L83-L89
train
consbio/ncdjango
ncdjango/utils.py
project_geometry
def project_geometry(geometry, source, target): """Projects a shapely geometry object from the source to the target projection.""" project = partial( pyproj.transform, source, target ) return transform(project, geometry)
python
def project_geometry(geometry, source, target): """Projects a shapely geometry object from the source to the target projection.""" project = partial( pyproj.transform, source, target ) return transform(project, geometry)
[ "def", "project_geometry", "(", "geometry", ",", "source", ",", "target", ")", ":", "project", "=", "partial", "(", "pyproj", ".", "transform", ",", "source", ",", "target", ")", "return", "transform", "(", "project", ",", "geometry", ")" ]
Projects a shapely geometry object from the source to the target projection.
[ "Projects", "a", "shapely", "geometry", "object", "from", "the", "source", "to", "the", "target", "projection", "." ]
f807bfd1e4083ab29fbc3c4d4418be108383a710
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/utils.py#L92-L101
train
DarkEnergySurvey/ugali
ugali/utils/config.py
Config._load
def _load(self, config): """ Load this config from an existing config Parameters: ----------- config : filename, config object, or dict to load Returns: -------- params : configuration parameters """ if isstring(config): self.filename = config params = yaml.load(open(config)) elif isinstance(config, Config): # This is the copy constructor... self.filename = config.filename params = copy.deepcopy(config) elif isinstance(config, dict): params = copy.deepcopy(config) elif config is None: params = {} else: raise Exception('Unrecognized input') return params
python
def _load(self, config): """ Load this config from an existing config Parameters: ----------- config : filename, config object, or dict to load Returns: -------- params : configuration parameters """ if isstring(config): self.filename = config params = yaml.load(open(config)) elif isinstance(config, Config): # This is the copy constructor... self.filename = config.filename params = copy.deepcopy(config) elif isinstance(config, dict): params = copy.deepcopy(config) elif config is None: params = {} else: raise Exception('Unrecognized input') return params
[ "def", "_load", "(", "self", ",", "config", ")", ":", "if", "isstring", "(", "config", ")", ":", "self", ".", "filename", "=", "config", "params", "=", "yaml", ".", "load", "(", "open", "(", "config", ")", ")", "elif", "isinstance", "(", "config", ",", "Config", ")", ":", "# This is the copy constructor...", "self", ".", "filename", "=", "config", ".", "filename", "params", "=", "copy", ".", "deepcopy", "(", "config", ")", "elif", "isinstance", "(", "config", ",", "dict", ")", ":", "params", "=", "copy", ".", "deepcopy", "(", "config", ")", "elif", "config", "is", "None", ":", "params", "=", "{", "}", "else", ":", "raise", "Exception", "(", "'Unrecognized input'", ")", "return", "params" ]
Load this config from an existing config Parameters: ----------- config : filename, config object, or dict to load Returns: -------- params : configuration parameters
[ "Load", "this", "config", "from", "an", "existing", "config" ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/config.py#L64-L89
train
DarkEnergySurvey/ugali
ugali/utils/config.py
Config._validate
def _validate(self): """ Enforce some structure to the config file """ # This could be done with a default config # Check that specific keys exist sections = odict([ ('catalog',['dirname','basename', 'lon_field','lat_field','objid_field', 'mag_1_band', 'mag_1_field', 'mag_err_1_field', 'mag_2_band', 'mag_2_field', 'mag_err_2_field', ]), ('mask',[]), ('coords',['nside_catalog','nside_mask','nside_likelihood', 'nside_pixel','roi_radius','roi_radius_annulus', 'roi_radius_interior','coordsys', ]), ('likelihood',[]), ('output',[]), ('batch',[]), ]) keys = np.array(list(sections.keys())) found = np.in1d(keys,list(self.keys())) if not np.all(found): msg = 'Missing sections: '+str(keys[~found]) raise Exception(msg) for section,keys in sections.items(): keys = np.array(keys) found = np.in1d(keys,list(self[section].keys())) if not np.all(found): msg = 'Missing keys in %s: '%(section)+str(keys[~found]) raise Exception(msg)
python
def _validate(self): """ Enforce some structure to the config file """ # This could be done with a default config # Check that specific keys exist sections = odict([ ('catalog',['dirname','basename', 'lon_field','lat_field','objid_field', 'mag_1_band', 'mag_1_field', 'mag_err_1_field', 'mag_2_band', 'mag_2_field', 'mag_err_2_field', ]), ('mask',[]), ('coords',['nside_catalog','nside_mask','nside_likelihood', 'nside_pixel','roi_radius','roi_radius_annulus', 'roi_radius_interior','coordsys', ]), ('likelihood',[]), ('output',[]), ('batch',[]), ]) keys = np.array(list(sections.keys())) found = np.in1d(keys,list(self.keys())) if not np.all(found): msg = 'Missing sections: '+str(keys[~found]) raise Exception(msg) for section,keys in sections.items(): keys = np.array(keys) found = np.in1d(keys,list(self[section].keys())) if not np.all(found): msg = 'Missing keys in %s: '%(section)+str(keys[~found]) raise Exception(msg)
[ "def", "_validate", "(", "self", ")", ":", "# This could be done with a default config", "# Check that specific keys exist", "sections", "=", "odict", "(", "[", "(", "'catalog'", ",", "[", "'dirname'", ",", "'basename'", ",", "'lon_field'", ",", "'lat_field'", ",", "'objid_field'", ",", "'mag_1_band'", ",", "'mag_1_field'", ",", "'mag_err_1_field'", ",", "'mag_2_band'", ",", "'mag_2_field'", ",", "'mag_err_2_field'", ",", "]", ")", ",", "(", "'mask'", ",", "[", "]", ")", ",", "(", "'coords'", ",", "[", "'nside_catalog'", ",", "'nside_mask'", ",", "'nside_likelihood'", ",", "'nside_pixel'", ",", "'roi_radius'", ",", "'roi_radius_annulus'", ",", "'roi_radius_interior'", ",", "'coordsys'", ",", "]", ")", ",", "(", "'likelihood'", ",", "[", "]", ")", ",", "(", "'output'", ",", "[", "]", ")", ",", "(", "'batch'", ",", "[", "]", ")", ",", "]", ")", "keys", "=", "np", ".", "array", "(", "list", "(", "sections", ".", "keys", "(", ")", ")", ")", "found", "=", "np", ".", "in1d", "(", "keys", ",", "list", "(", "self", ".", "keys", "(", ")", ")", ")", "if", "not", "np", ".", "all", "(", "found", ")", ":", "msg", "=", "'Missing sections: '", "+", "str", "(", "keys", "[", "~", "found", "]", ")", "raise", "Exception", "(", "msg", ")", "for", "section", ",", "keys", "in", "sections", ".", "items", "(", ")", ":", "keys", "=", "np", ".", "array", "(", "keys", ")", "found", "=", "np", ".", "in1d", "(", "keys", ",", "list", "(", "self", "[", "section", "]", ".", "keys", "(", ")", ")", ")", "if", "not", "np", ".", "all", "(", "found", ")", ":", "msg", "=", "'Missing keys in %s: '", "%", "(", "section", ")", "+", "str", "(", "keys", "[", "~", "found", "]", ")", "raise", "Exception", "(", "msg", ")" ]
Enforce some structure to the config file
[ "Enforce", "some", "structure", "to", "the", "config", "file" ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/config.py#L91-L124
train
DarkEnergySurvey/ugali
ugali/utils/config.py
Config._formatFilepaths
def _formatFilepaths(self): """ Join dirnames and filenames from config. """ likedir=self['output']['likedir'] self.likefile = join(likedir,self['output']['likefile']) self.mergefile = join(likedir,self['output']['mergefile']) self.roifile = join(likedir,self['output']['roifile']) searchdir=self['output']['searchdir'] self.labelfile = join(searchdir,self['output']['labelfile']) self.objectfile = join(searchdir,self['output']['objectfile']) self.assocfile = join(searchdir,self['output']['assocfile']) self.candfile = join(searchdir,self['output']['candfile']) mcmcdir=self['output']['mcmcdir'] self.mcmcfile = join(mcmcdir,self['output']['mcmcfile'])
python
def _formatFilepaths(self): """ Join dirnames and filenames from config. """ likedir=self['output']['likedir'] self.likefile = join(likedir,self['output']['likefile']) self.mergefile = join(likedir,self['output']['mergefile']) self.roifile = join(likedir,self['output']['roifile']) searchdir=self['output']['searchdir'] self.labelfile = join(searchdir,self['output']['labelfile']) self.objectfile = join(searchdir,self['output']['objectfile']) self.assocfile = join(searchdir,self['output']['assocfile']) self.candfile = join(searchdir,self['output']['candfile']) mcmcdir=self['output']['mcmcdir'] self.mcmcfile = join(mcmcdir,self['output']['mcmcfile'])
[ "def", "_formatFilepaths", "(", "self", ")", ":", "likedir", "=", "self", "[", "'output'", "]", "[", "'likedir'", "]", "self", ".", "likefile", "=", "join", "(", "likedir", ",", "self", "[", "'output'", "]", "[", "'likefile'", "]", ")", "self", ".", "mergefile", "=", "join", "(", "likedir", ",", "self", "[", "'output'", "]", "[", "'mergefile'", "]", ")", "self", ".", "roifile", "=", "join", "(", "likedir", ",", "self", "[", "'output'", "]", "[", "'roifile'", "]", ")", "searchdir", "=", "self", "[", "'output'", "]", "[", "'searchdir'", "]", "self", ".", "labelfile", "=", "join", "(", "searchdir", ",", "self", "[", "'output'", "]", "[", "'labelfile'", "]", ")", "self", ".", "objectfile", "=", "join", "(", "searchdir", ",", "self", "[", "'output'", "]", "[", "'objectfile'", "]", ")", "self", ".", "assocfile", "=", "join", "(", "searchdir", ",", "self", "[", "'output'", "]", "[", "'assocfile'", "]", ")", "self", ".", "candfile", "=", "join", "(", "searchdir", ",", "self", "[", "'output'", "]", "[", "'candfile'", "]", ")", "mcmcdir", "=", "self", "[", "'output'", "]", "[", "'mcmcdir'", "]", "self", ".", "mcmcfile", "=", "join", "(", "mcmcdir", ",", "self", "[", "'output'", "]", "[", "'mcmcfile'", "]", ")" ]
Join dirnames and filenames from config.
[ "Join", "dirnames", "and", "filenames", "from", "config", "." ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/config.py#L129-L145
train
DarkEnergySurvey/ugali
ugali/utils/config.py
Config.write
def write(self, filename): """ Write a copy of this config object. Parameters: ----------- outfile : output filename Returns: -------- None """ ext = os.path.splitext(filename)[1] writer = open(filename, 'w') if ext == '.py': writer.write(pprint.pformat(self)) elif ext == '.yaml': writer.write(yaml.dump(self)) else: writer.close() raise Exception('Unrecognized config format: %s'%ext) writer.close()
python
def write(self, filename): """ Write a copy of this config object. Parameters: ----------- outfile : output filename Returns: -------- None """ ext = os.path.splitext(filename)[1] writer = open(filename, 'w') if ext == '.py': writer.write(pprint.pformat(self)) elif ext == '.yaml': writer.write(yaml.dump(self)) else: writer.close() raise Exception('Unrecognized config format: %s'%ext) writer.close()
[ "def", "write", "(", "self", ",", "filename", ")", ":", "ext", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "[", "1", "]", "writer", "=", "open", "(", "filename", ",", "'w'", ")", "if", "ext", "==", "'.py'", ":", "writer", ".", "write", "(", "pprint", ".", "pformat", "(", "self", ")", ")", "elif", "ext", "==", "'.yaml'", ":", "writer", ".", "write", "(", "yaml", ".", "dump", "(", "self", ")", ")", "else", ":", "writer", ".", "close", "(", ")", "raise", "Exception", "(", "'Unrecognized config format: %s'", "%", "ext", ")", "writer", ".", "close", "(", ")" ]
Write a copy of this config object. Parameters: ----------- outfile : output filename Returns: -------- None
[ "Write", "a", "copy", "of", "this", "config", "object", "." ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/config.py#L147-L168
train
DarkEnergySurvey/ugali
ugali/utils/config.py
Config.getFilenames
def getFilenames(self,pixels=None): """ Return the requested filenames. Parameters: ----------- pixels : requeseted pixels Returns: -------- filenames : recarray """ logger.debug("Getting filenames...") if pixels is None: return self.filenames else: return self.filenames[np.in1d(self.filenames['pix'],pixels)]
python
def getFilenames(self,pixels=None): """ Return the requested filenames. Parameters: ----------- pixels : requeseted pixels Returns: -------- filenames : recarray """ logger.debug("Getting filenames...") if pixels is None: return self.filenames else: return self.filenames[np.in1d(self.filenames['pix'],pixels)]
[ "def", "getFilenames", "(", "self", ",", "pixels", "=", "None", ")", ":", "logger", ".", "debug", "(", "\"Getting filenames...\"", ")", "if", "pixels", "is", "None", ":", "return", "self", ".", "filenames", "else", ":", "return", "self", ".", "filenames", "[", "np", ".", "in1d", "(", "self", ".", "filenames", "[", "'pix'", "]", ",", "pixels", ")", "]" ]
Return the requested filenames. Parameters: ----------- pixels : requeseted pixels Returns: -------- filenames : recarray
[ "Return", "the", "requested", "filenames", "." ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/config.py#L305-L321
train
DarkEnergySurvey/ugali
ugali/utils/healpix.py
superpixel
def superpixel(subpix, nside_subpix, nside_superpix): """ Return the indices of the super-pixels which contain each of the sub-pixels. """ if nside_subpix==nside_superpix: return subpix theta, phi = hp.pix2ang(nside_subpix, subpix) return hp.ang2pix(nside_superpix, theta, phi)
python
def superpixel(subpix, nside_subpix, nside_superpix): """ Return the indices of the super-pixels which contain each of the sub-pixels. """ if nside_subpix==nside_superpix: return subpix theta, phi = hp.pix2ang(nside_subpix, subpix) return hp.ang2pix(nside_superpix, theta, phi)
[ "def", "superpixel", "(", "subpix", ",", "nside_subpix", ",", "nside_superpix", ")", ":", "if", "nside_subpix", "==", "nside_superpix", ":", "return", "subpix", "theta", ",", "phi", "=", "hp", ".", "pix2ang", "(", "nside_subpix", ",", "subpix", ")", "return", "hp", ".", "ang2pix", "(", "nside_superpix", ",", "theta", ",", "phi", ")" ]
Return the indices of the super-pixels which contain each of the sub-pixels.
[ "Return", "the", "indices", "of", "the", "super", "-", "pixels", "which", "contain", "each", "of", "the", "sub", "-", "pixels", "." ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/healpix.py#L20-L26
train
DarkEnergySurvey/ugali
ugali/utils/healpix.py
ud_grade_ipix
def ud_grade_ipix(ipix, nside_in, nside_out, nest=False): """ Upgrade or degrade resolution of a pixel list. Parameters: ----------- ipix:array-like the input pixel(s) nside_in:int the nside of the input pixel(s) nside_out:int the desired nside of the output pixel(s) order:str pixel ordering of input and output ("RING" or "NESTED") Returns: -------- pix_out:array-like the upgraded or degraded pixel array """ if nside_in == nside_out: return ipix elif nside_in < nside_out: return u_grade_ipix(ipix, nside_in, nside_out, nest) elif nside_in > nside_out: return d_grade_ipix(ipix, nside_in, nside_out, nest)
python
def ud_grade_ipix(ipix, nside_in, nside_out, nest=False): """ Upgrade or degrade resolution of a pixel list. Parameters: ----------- ipix:array-like the input pixel(s) nside_in:int the nside of the input pixel(s) nside_out:int the desired nside of the output pixel(s) order:str pixel ordering of input and output ("RING" or "NESTED") Returns: -------- pix_out:array-like the upgraded or degraded pixel array """ if nside_in == nside_out: return ipix elif nside_in < nside_out: return u_grade_ipix(ipix, nside_in, nside_out, nest) elif nside_in > nside_out: return d_grade_ipix(ipix, nside_in, nside_out, nest)
[ "def", "ud_grade_ipix", "(", "ipix", ",", "nside_in", ",", "nside_out", ",", "nest", "=", "False", ")", ":", "if", "nside_in", "==", "nside_out", ":", "return", "ipix", "elif", "nside_in", "<", "nside_out", ":", "return", "u_grade_ipix", "(", "ipix", ",", "nside_in", ",", "nside_out", ",", "nest", ")", "elif", "nside_in", ">", "nside_out", ":", "return", "d_grade_ipix", "(", "ipix", ",", "nside_in", ",", "nside_out", ",", "nest", ")" ]
Upgrade or degrade resolution of a pixel list. Parameters: ----------- ipix:array-like the input pixel(s) nside_in:int the nside of the input pixel(s) nside_out:int the desired nside of the output pixel(s) order:str pixel ordering of input and output ("RING" or "NESTED") Returns: -------- pix_out:array-like the upgraded or degraded pixel array
[ "Upgrade", "or", "degrade", "resolution", "of", "a", "pixel", "list", "." ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/healpix.py#L99-L126
train
DarkEnergySurvey/ugali
ugali/utils/healpix.py
index_pix_in_pixels
def index_pix_in_pixels(pix,pixels,sort=False,outside=-1): """ Find the indices of a set of pixels into another set of pixels. !!! ASSUMES SORTED PIXELS !!! Parameters: ----------- pix : set of search pixels pixels : set of reference pixels Returns: -------- index : index into the reference pixels """ # ADW: Not really safe to set index = -1 (accesses last entry); # -np.inf would be better, but breaks other code... # ADW: Are the pixels always sorted? Is there a quick way to check? if sort: pixels = np.sort(pixels) # Assumes that 'pixels' is pre-sorted, otherwise...??? index = np.searchsorted(pixels,pix) if np.isscalar(index): if not np.in1d(pix,pixels).any(): index = outside else: # Find objects that are outside the pixels index[~np.in1d(pix,pixels)] = outside return index
python
def index_pix_in_pixels(pix,pixels,sort=False,outside=-1): """ Find the indices of a set of pixels into another set of pixels. !!! ASSUMES SORTED PIXELS !!! Parameters: ----------- pix : set of search pixels pixels : set of reference pixels Returns: -------- index : index into the reference pixels """ # ADW: Not really safe to set index = -1 (accesses last entry); # -np.inf would be better, but breaks other code... # ADW: Are the pixels always sorted? Is there a quick way to check? if sort: pixels = np.sort(pixels) # Assumes that 'pixels' is pre-sorted, otherwise...??? index = np.searchsorted(pixels,pix) if np.isscalar(index): if not np.in1d(pix,pixels).any(): index = outside else: # Find objects that are outside the pixels index[~np.in1d(pix,pixels)] = outside return index
[ "def", "index_pix_in_pixels", "(", "pix", ",", "pixels", ",", "sort", "=", "False", ",", "outside", "=", "-", "1", ")", ":", "# ADW: Not really safe to set index = -1 (accesses last entry); ", "# -np.inf would be better, but breaks other code...", "# ADW: Are the pixels always sorted? Is there a quick way to check?", "if", "sort", ":", "pixels", "=", "np", ".", "sort", "(", "pixels", ")", "# Assumes that 'pixels' is pre-sorted, otherwise...???", "index", "=", "np", ".", "searchsorted", "(", "pixels", ",", "pix", ")", "if", "np", ".", "isscalar", "(", "index", ")", ":", "if", "not", "np", ".", "in1d", "(", "pix", ",", "pixels", ")", ".", "any", "(", ")", ":", "index", "=", "outside", "else", ":", "# Find objects that are outside the pixels", "index", "[", "~", "np", ".", "in1d", "(", "pix", ",", "pixels", ")", "]", "=", "outside", "return", "index" ]
Find the indices of a set of pixels into another set of pixels. !!! ASSUMES SORTED PIXELS !!! Parameters: ----------- pix : set of search pixels pixels : set of reference pixels Returns: -------- index : index into the reference pixels
[ "Find", "the", "indices", "of", "a", "set", "of", "pixels", "into", "another", "set", "of", "pixels", ".", "!!!", "ASSUMES", "SORTED", "PIXELS", "!!!" ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/healpix.py#L217-L244
train
DarkEnergySurvey/ugali
ugali/utils/healpix.py
index_lonlat_in_pixels
def index_lonlat_in_pixels(lon,lat,pixels,nside,sort=False,outside=-1): """ Find the indices of a set of angles into a set of pixels Parameters: ----------- pix : set of search pixels pixels : set of reference pixels Returns: -------- index : index into the reference pixels """ pix = ang2pix(nside,lon,lat) return index_pix_in_pixels(pix,pixels,sort,outside)
python
def index_lonlat_in_pixels(lon,lat,pixels,nside,sort=False,outside=-1): """ Find the indices of a set of angles into a set of pixels Parameters: ----------- pix : set of search pixels pixels : set of reference pixels Returns: -------- index : index into the reference pixels """ pix = ang2pix(nside,lon,lat) return index_pix_in_pixels(pix,pixels,sort,outside)
[ "def", "index_lonlat_in_pixels", "(", "lon", ",", "lat", ",", "pixels", ",", "nside", ",", "sort", "=", "False", ",", "outside", "=", "-", "1", ")", ":", "pix", "=", "ang2pix", "(", "nside", ",", "lon", ",", "lat", ")", "return", "index_pix_in_pixels", "(", "pix", ",", "pixels", ",", "sort", ",", "outside", ")" ]
Find the indices of a set of angles into a set of pixels Parameters: ----------- pix : set of search pixels pixels : set of reference pixels Returns: -------- index : index into the reference pixels
[ "Find", "the", "indices", "of", "a", "set", "of", "angles", "into", "a", "set", "of", "pixels" ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/healpix.py#L246-L261
train
DarkEnergySurvey/ugali
ugali/utils/healpix.py
header_odict
def header_odict(nside,nest=False,coord=None, partial=True): """Mimic the healpy header keywords.""" hdr = odict([]) hdr['PIXTYPE']=odict([('name','PIXTYPE'), ('value','HEALPIX'), ('comment','HEALPIX pixelisation')]) ordering = 'NEST' if nest else 'RING' hdr['ORDERING']=odict([('name','ORDERING'), ('value',ordering), ('comment','Pixel ordering scheme, either RING or NESTED')]) hdr['NSIDE']=odict([('name','NSIDE'), ('value',nside), ('comment','Resolution parameter of HEALPIX')]) if coord: hdr['COORDSYS']=odict([('name','COORDSYS'), ('value',coord), ('comment','Ecliptic, Galactic or Celestial (equatorial)')]) if not partial: hdr['FIRSTPIX']=odict([('name','FIRSTPIX'), ('value',0), ('comment','First pixel # (0 based)')]) hdr['LASTPIX']=odict([('name','LASTPIX'), ('value',hp.nside2npix(nside)-1), ('comment','Last pixel # (0 based)')]) hdr['INDXSCHM']=odict([('name','INDXSCHM'), ('value','EXPLICIT' if partial else 'IMPLICIT'), ('comment','Indexing: IMPLICIT or EXPLICIT')]) hdr['OBJECT']=odict([('name','OBJECT'), ('value','PARTIAL' if partial else 'FULLSKY'), ('comment','Sky coverage, either FULLSKY or PARTIAL')]) return hdr
python
def header_odict(nside,nest=False,coord=None, partial=True): """Mimic the healpy header keywords.""" hdr = odict([]) hdr['PIXTYPE']=odict([('name','PIXTYPE'), ('value','HEALPIX'), ('comment','HEALPIX pixelisation')]) ordering = 'NEST' if nest else 'RING' hdr['ORDERING']=odict([('name','ORDERING'), ('value',ordering), ('comment','Pixel ordering scheme, either RING or NESTED')]) hdr['NSIDE']=odict([('name','NSIDE'), ('value',nside), ('comment','Resolution parameter of HEALPIX')]) if coord: hdr['COORDSYS']=odict([('name','COORDSYS'), ('value',coord), ('comment','Ecliptic, Galactic or Celestial (equatorial)')]) if not partial: hdr['FIRSTPIX']=odict([('name','FIRSTPIX'), ('value',0), ('comment','First pixel # (0 based)')]) hdr['LASTPIX']=odict([('name','LASTPIX'), ('value',hp.nside2npix(nside)-1), ('comment','Last pixel # (0 based)')]) hdr['INDXSCHM']=odict([('name','INDXSCHM'), ('value','EXPLICIT' if partial else 'IMPLICIT'), ('comment','Indexing: IMPLICIT or EXPLICIT')]) hdr['OBJECT']=odict([('name','OBJECT'), ('value','PARTIAL' if partial else 'FULLSKY'), ('comment','Sky coverage, either FULLSKY or PARTIAL')]) return hdr
[ "def", "header_odict", "(", "nside", ",", "nest", "=", "False", ",", "coord", "=", "None", ",", "partial", "=", "True", ")", ":", "hdr", "=", "odict", "(", "[", "]", ")", "hdr", "[", "'PIXTYPE'", "]", "=", "odict", "(", "[", "(", "'name'", ",", "'PIXTYPE'", ")", ",", "(", "'value'", ",", "'HEALPIX'", ")", ",", "(", "'comment'", ",", "'HEALPIX pixelisation'", ")", "]", ")", "ordering", "=", "'NEST'", "if", "nest", "else", "'RING'", "hdr", "[", "'ORDERING'", "]", "=", "odict", "(", "[", "(", "'name'", ",", "'ORDERING'", ")", ",", "(", "'value'", ",", "ordering", ")", ",", "(", "'comment'", ",", "'Pixel ordering scheme, either RING or NESTED'", ")", "]", ")", "hdr", "[", "'NSIDE'", "]", "=", "odict", "(", "[", "(", "'name'", ",", "'NSIDE'", ")", ",", "(", "'value'", ",", "nside", ")", ",", "(", "'comment'", ",", "'Resolution parameter of HEALPIX'", ")", "]", ")", "if", "coord", ":", "hdr", "[", "'COORDSYS'", "]", "=", "odict", "(", "[", "(", "'name'", ",", "'COORDSYS'", ")", ",", "(", "'value'", ",", "coord", ")", ",", "(", "'comment'", ",", "'Ecliptic, Galactic or Celestial (equatorial)'", ")", "]", ")", "if", "not", "partial", ":", "hdr", "[", "'FIRSTPIX'", "]", "=", "odict", "(", "[", "(", "'name'", ",", "'FIRSTPIX'", ")", ",", "(", "'value'", ",", "0", ")", ",", "(", "'comment'", ",", "'First pixel # (0 based)'", ")", "]", ")", "hdr", "[", "'LASTPIX'", "]", "=", "odict", "(", "[", "(", "'name'", ",", "'LASTPIX'", ")", ",", "(", "'value'", ",", "hp", ".", "nside2npix", "(", "nside", ")", "-", "1", ")", ",", "(", "'comment'", ",", "'Last pixel # (0 based)'", ")", "]", ")", "hdr", "[", "'INDXSCHM'", "]", "=", "odict", "(", "[", "(", "'name'", ",", "'INDXSCHM'", ")", ",", "(", "'value'", ",", "'EXPLICIT'", "if", "partial", "else", "'IMPLICIT'", ")", ",", "(", "'comment'", ",", "'Indexing: IMPLICIT or EXPLICIT'", ")", "]", ")", "hdr", "[", "'OBJECT'", "]", "=", "odict", "(", "[", "(", "'name'", ",", "'OBJECT'", ")", ",", "(", "'value'", ",", "'PARTIAL'", "if", "partial", "else", "'FULLSKY'", ")", ",", "(", "'comment'", ",", "'Sky coverage, either FULLSKY or PARTIAL'", ")", "]", ")", "return", "hdr" ]
Mimic the healpy header keywords.
[ "Mimic", "the", "healpy", "header", "keywords", "." ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/healpix.py#L330-L362
train
DarkEnergySurvey/ugali
ugali/utils/healpix.py
write_partial_map
def write_partial_map(filename, data, nside, coord=None, nest=False, header=None,dtype=None,**kwargs): """ Partial HEALPix maps are used to efficiently store maps of the sky by only writing out the pixels that contain data. Three-dimensional data can be saved by supplying a distance modulus array which is stored in a separate extension. Parameters: ----------- filename : output file name data : dictionary or recarray of data to write (must contain 'PIXEL') nside : healpix nside of data coord : 'G'alactic, 'C'elestial, 'E'cliptic ordering : 'RING' or 'NEST' kwargs : Passed to fitsio.write Returns: -------- None """ # ADW: Do we want to make everything uppercase? if isinstance(data,dict): names = list(data.keys()) else: names = data.dtype.names if 'PIXEL' not in names: msg = "'PIXEL' column not found." raise ValueError(msg) hdr = header_odict(nside=nside,coord=coord,nest=nest) fitshdr = fitsio.FITSHDR(list(hdr.values())) if header is not None: for k,v in header.items(): fitshdr.add_record({'name':k,'value':v}) logger.info("Writing %s"%filename) fitsio.write(filename,data,extname='PIX_DATA',header=fitshdr,clobber=True)
python
def write_partial_map(filename, data, nside, coord=None, nest=False, header=None,dtype=None,**kwargs): """ Partial HEALPix maps are used to efficiently store maps of the sky by only writing out the pixels that contain data. Three-dimensional data can be saved by supplying a distance modulus array which is stored in a separate extension. Parameters: ----------- filename : output file name data : dictionary or recarray of data to write (must contain 'PIXEL') nside : healpix nside of data coord : 'G'alactic, 'C'elestial, 'E'cliptic ordering : 'RING' or 'NEST' kwargs : Passed to fitsio.write Returns: -------- None """ # ADW: Do we want to make everything uppercase? if isinstance(data,dict): names = list(data.keys()) else: names = data.dtype.names if 'PIXEL' not in names: msg = "'PIXEL' column not found." raise ValueError(msg) hdr = header_odict(nside=nside,coord=coord,nest=nest) fitshdr = fitsio.FITSHDR(list(hdr.values())) if header is not None: for k,v in header.items(): fitshdr.add_record({'name':k,'value':v}) logger.info("Writing %s"%filename) fitsio.write(filename,data,extname='PIX_DATA',header=fitshdr,clobber=True)
[ "def", "write_partial_map", "(", "filename", ",", "data", ",", "nside", ",", "coord", "=", "None", ",", "nest", "=", "False", ",", "header", "=", "None", ",", "dtype", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# ADW: Do we want to make everything uppercase?", "if", "isinstance", "(", "data", ",", "dict", ")", ":", "names", "=", "list", "(", "data", ".", "keys", "(", ")", ")", "else", ":", "names", "=", "data", ".", "dtype", ".", "names", "if", "'PIXEL'", "not", "in", "names", ":", "msg", "=", "\"'PIXEL' column not found.\"", "raise", "ValueError", "(", "msg", ")", "hdr", "=", "header_odict", "(", "nside", "=", "nside", ",", "coord", "=", "coord", ",", "nest", "=", "nest", ")", "fitshdr", "=", "fitsio", ".", "FITSHDR", "(", "list", "(", "hdr", ".", "values", "(", ")", ")", ")", "if", "header", "is", "not", "None", ":", "for", "k", ",", "v", "in", "header", ".", "items", "(", ")", ":", "fitshdr", ".", "add_record", "(", "{", "'name'", ":", "k", ",", "'value'", ":", "v", "}", ")", "logger", ".", "info", "(", "\"Writing %s\"", "%", "filename", ")", "fitsio", ".", "write", "(", "filename", ",", "data", ",", "extname", "=", "'PIX_DATA'", ",", "header", "=", "fitshdr", ",", "clobber", "=", "True", ")" ]
Partial HEALPix maps are used to efficiently store maps of the sky by only writing out the pixels that contain data. Three-dimensional data can be saved by supplying a distance modulus array which is stored in a separate extension. Parameters: ----------- filename : output file name data : dictionary or recarray of data to write (must contain 'PIXEL') nside : healpix nside of data coord : 'G'alactic, 'C'elestial, 'E'cliptic ordering : 'RING' or 'NEST' kwargs : Passed to fitsio.write Returns: -------- None
[ "Partial", "HEALPix", "maps", "are", "used", "to", "efficiently", "store", "maps", "of", "the", "sky", "by", "only", "writing", "out", "the", "pixels", "that", "contain", "data", "." ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/healpix.py#L364-L404
train
DarkEnergySurvey/ugali
ugali/utils/healpix.py
merge_likelihood_headers
def merge_likelihood_headers(filenames, outfile): """ Merge header information from likelihood files. Parameters: ----------- filenames : input filenames oufile : the merged file to write Returns: -------- data : the data being written """ filenames = np.atleast_1d(filenames) ext='PIX_DATA' nside = fitsio.read_header(filenames[0],ext=ext)['LKDNSIDE'] keys=['STELLAR','NINSIDE','NANNULUS'] data_dict = odict(PIXEL=[]) for k in keys: data_dict[k] = [] for i,filename in enumerate(filenames): logger.debug('(%i/%i) %s'%(i+1, len(filenames), filename)) header = fitsio.read_header(filename,ext=ext) data_dict['PIXEL'].append(header['LKDPIX']) for key in keys: data_dict[key].append(header[key]) del header data_dict['PIXEL'] = np.array(data_dict['PIXEL'],dtype=int) for key in keys: data_dict[key] = np.array(data_dict[key],dtype='f4') #import pdb; pdb.set_trace() write_partial_map(outfile, data_dict, nside) return data_dict
python
def merge_likelihood_headers(filenames, outfile): """ Merge header information from likelihood files. Parameters: ----------- filenames : input filenames oufile : the merged file to write Returns: -------- data : the data being written """ filenames = np.atleast_1d(filenames) ext='PIX_DATA' nside = fitsio.read_header(filenames[0],ext=ext)['LKDNSIDE'] keys=['STELLAR','NINSIDE','NANNULUS'] data_dict = odict(PIXEL=[]) for k in keys: data_dict[k] = [] for i,filename in enumerate(filenames): logger.debug('(%i/%i) %s'%(i+1, len(filenames), filename)) header = fitsio.read_header(filename,ext=ext) data_dict['PIXEL'].append(header['LKDPIX']) for key in keys: data_dict[key].append(header[key]) del header data_dict['PIXEL'] = np.array(data_dict['PIXEL'],dtype=int) for key in keys: data_dict[key] = np.array(data_dict[key],dtype='f4') #import pdb; pdb.set_trace() write_partial_map(outfile, data_dict, nside) return data_dict
[ "def", "merge_likelihood_headers", "(", "filenames", ",", "outfile", ")", ":", "filenames", "=", "np", ".", "atleast_1d", "(", "filenames", ")", "ext", "=", "'PIX_DATA'", "nside", "=", "fitsio", ".", "read_header", "(", "filenames", "[", "0", "]", ",", "ext", "=", "ext", ")", "[", "'LKDNSIDE'", "]", "keys", "=", "[", "'STELLAR'", ",", "'NINSIDE'", ",", "'NANNULUS'", "]", "data_dict", "=", "odict", "(", "PIXEL", "=", "[", "]", ")", "for", "k", "in", "keys", ":", "data_dict", "[", "k", "]", "=", "[", "]", "for", "i", ",", "filename", "in", "enumerate", "(", "filenames", ")", ":", "logger", ".", "debug", "(", "'(%i/%i) %s'", "%", "(", "i", "+", "1", ",", "len", "(", "filenames", ")", ",", "filename", ")", ")", "header", "=", "fitsio", ".", "read_header", "(", "filename", ",", "ext", "=", "ext", ")", "data_dict", "[", "'PIXEL'", "]", ".", "append", "(", "header", "[", "'LKDPIX'", "]", ")", "for", "key", "in", "keys", ":", "data_dict", "[", "key", "]", ".", "append", "(", "header", "[", "key", "]", ")", "del", "header", "data_dict", "[", "'PIXEL'", "]", "=", "np", ".", "array", "(", "data_dict", "[", "'PIXEL'", "]", ",", "dtype", "=", "int", ")", "for", "key", "in", "keys", ":", "data_dict", "[", "key", "]", "=", "np", ".", "array", "(", "data_dict", "[", "key", "]", ",", "dtype", "=", "'f4'", ")", "#import pdb; pdb.set_trace()", "write_partial_map", "(", "outfile", ",", "data_dict", ",", "nside", ")", "return", "data_dict" ]
Merge header information from likelihood files. Parameters: ----------- filenames : input filenames oufile : the merged file to write Returns: -------- data : the data being written
[ "Merge", "header", "information", "from", "likelihood", "files", "." ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/healpix.py#L479-L518
train
consbio/ncdjango
ncdjango/api.py
TemporaryFileResource._convert_number
def _convert_number(self, number): """Converts a number to float or int as appropriate""" number = float(number) return int(number) if number.is_integer() else float(number)
python
def _convert_number(self, number): """Converts a number to float or int as appropriate""" number = float(number) return int(number) if number.is_integer() else float(number)
[ "def", "_convert_number", "(", "self", ",", "number", ")", ":", "number", "=", "float", "(", "number", ")", "return", "int", "(", "number", ")", "if", "number", ".", "is_integer", "(", ")", "else", "float", "(", "number", ")" ]
Converts a number to float or int as appropriate
[ "Converts", "a", "number", "to", "float", "or", "int", "as", "appropriate" ]
f807bfd1e4083ab29fbc3c4d4418be108383a710
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/api.py#L46-L50
train
DarkEnergySurvey/ugali
ugali/pipeline/run_05.0_followup.py
do_results
def do_results(args): """ Write the results output file """ config,name,label,coord = args filenames = make_filenames(config,label) srcfile = filenames['srcfile'] samples = filenames['samfile'] if not exists(srcfile): logger.warning("Couldn't find %s; skipping..."%srcfile) return if not exists(samples): logger.warning("Couldn't find %s; skipping..."%samples) return logger.info("Writing %s..."%srcfile) from ugali.analysis.results import write_results write_results(srcfile,config,srcfile,samples)
python
def do_results(args): """ Write the results output file """ config,name,label,coord = args filenames = make_filenames(config,label) srcfile = filenames['srcfile'] samples = filenames['samfile'] if not exists(srcfile): logger.warning("Couldn't find %s; skipping..."%srcfile) return if not exists(samples): logger.warning("Couldn't find %s; skipping..."%samples) return logger.info("Writing %s..."%srcfile) from ugali.analysis.results import write_results write_results(srcfile,config,srcfile,samples)
[ "def", "do_results", "(", "args", ")", ":", "config", ",", "name", ",", "label", ",", "coord", "=", "args", "filenames", "=", "make_filenames", "(", "config", ",", "label", ")", "srcfile", "=", "filenames", "[", "'srcfile'", "]", "samples", "=", "filenames", "[", "'samfile'", "]", "if", "not", "exists", "(", "srcfile", ")", ":", "logger", ".", "warning", "(", "\"Couldn't find %s; skipping...\"", "%", "srcfile", ")", "return", "if", "not", "exists", "(", "samples", ")", ":", "logger", ".", "warning", "(", "\"Couldn't find %s; skipping...\"", "%", "samples", ")", "return", "logger", ".", "info", "(", "\"Writing %s...\"", "%", "srcfile", ")", "from", "ugali", ".", "analysis", ".", "results", "import", "write_results", "write_results", "(", "srcfile", ",", "config", ",", "srcfile", ",", "samples", ")" ]
Write the results output file
[ "Write", "the", "results", "output", "file" ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/pipeline/run_05.0_followup.py#L42-L59
train
DarkEnergySurvey/ugali
ugali/pipeline/run_05.0_followup.py
do_membership
def do_membership(args): """ Write the membership output file """ config,name,label,coord = args filenames = make_filenames(config,label) srcfile = filenames['srcfile'] memfile = filenames['memfile'] logger.info("Writing %s..."%memfile) from ugali.analysis.loglike import write_membership write_membership(memfile,config,srcfile,section='source')
python
def do_membership(args): """ Write the membership output file """ config,name,label,coord = args filenames = make_filenames(config,label) srcfile = filenames['srcfile'] memfile = filenames['memfile'] logger.info("Writing %s..."%memfile) from ugali.analysis.loglike import write_membership write_membership(memfile,config,srcfile,section='source')
[ "def", "do_membership", "(", "args", ")", ":", "config", ",", "name", ",", "label", ",", "coord", "=", "args", "filenames", "=", "make_filenames", "(", "config", ",", "label", ")", "srcfile", "=", "filenames", "[", "'srcfile'", "]", "memfile", "=", "filenames", "[", "'memfile'", "]", "logger", ".", "info", "(", "\"Writing %s...\"", "%", "memfile", ")", "from", "ugali", ".", "analysis", ".", "loglike", "import", "write_membership", "write_membership", "(", "memfile", ",", "config", ",", "srcfile", ",", "section", "=", "'source'", ")" ]
Write the membership output file
[ "Write", "the", "membership", "output", "file" ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/pipeline/run_05.0_followup.py#L61-L71
train
DarkEnergySurvey/ugali
ugali/pipeline/run_05.0_followup.py
do_plot
def do_plot(args): """ Create plots of mcmc output """ import ugali.utils.plotting import pylab as plt config,name,label,coord = args filenames = make_filenames(config,label) srcfile = filenames['srcfile'] samfile = filenames['samfile'] memfile = filenames['memfile'] if not exists(srcfile): logger.warning("Couldn't find %s; skipping..."%srcfile) return if not exists(samfile): logger.warning("Couldn't find %s; skipping..."%samfile) return config = ugali.utils.config.Config(config) burn = config['mcmc']['nburn']*config['mcmc']['nwalkers'] source = ugali.analysis.source.Source() source.load(srcfile,section='source') outfile = samfile.replace('.npy','.png') ugali.utils.plotting.plotTriangle(srcfile,samfile,burn=burn) logger.info(" Writing %s..."%outfile) plt.savefig(outfile,bbox_inches='tight',dpi=60) plt.close() plotter = ugali.utils.plotting.SourcePlotter(source,config,radius=0.5) data = fitsio.read(memfile,trim_strings=True) if exists(memfile) else None if data is not None: plt.figure() kernel,isochrone = source.kernel,source.isochrone ugali.utils.plotting.plotMembership(config,data,kernel,isochrone) outfile = samfile.replace('.npy','_mem.png') logger.info(" Writing %s..."%outfile) plt.savefig(outfile,bbox_inches='tight',dpi=60) plt.close() plotter.plot6(data) outfile = samfile.replace('.npy','_6panel.png') logger.info(" Writing %s..."%outfile) plt.savefig(outfile,bbox_inches='tight',dpi=60) outfile = samfile.replace('.npy','_6panel.pdf') logger.info(" Writing %s..."%outfile) plt.savefig(outfile,bbox_inches='tight',dpi=60) plt.close() try: title = name plotter.plot4() outfile = samfile.replace('.npy','_4panel.png') logger.info(" Writing %s..."%outfile) plt.suptitle(title) plt.savefig(outfile,bbox_inches='tight',dpi=60) plt.close() except: logger.warning(" Failed to create plotter.plot4()")
python
def do_plot(args): """ Create plots of mcmc output """ import ugali.utils.plotting import pylab as plt config,name,label,coord = args filenames = make_filenames(config,label) srcfile = filenames['srcfile'] samfile = filenames['samfile'] memfile = filenames['memfile'] if not exists(srcfile): logger.warning("Couldn't find %s; skipping..."%srcfile) return if not exists(samfile): logger.warning("Couldn't find %s; skipping..."%samfile) return config = ugali.utils.config.Config(config) burn = config['mcmc']['nburn']*config['mcmc']['nwalkers'] source = ugali.analysis.source.Source() source.load(srcfile,section='source') outfile = samfile.replace('.npy','.png') ugali.utils.plotting.plotTriangle(srcfile,samfile,burn=burn) logger.info(" Writing %s..."%outfile) plt.savefig(outfile,bbox_inches='tight',dpi=60) plt.close() plotter = ugali.utils.plotting.SourcePlotter(source,config,radius=0.5) data = fitsio.read(memfile,trim_strings=True) if exists(memfile) else None if data is not None: plt.figure() kernel,isochrone = source.kernel,source.isochrone ugali.utils.plotting.plotMembership(config,data,kernel,isochrone) outfile = samfile.replace('.npy','_mem.png') logger.info(" Writing %s..."%outfile) plt.savefig(outfile,bbox_inches='tight',dpi=60) plt.close() plotter.plot6(data) outfile = samfile.replace('.npy','_6panel.png') logger.info(" Writing %s..."%outfile) plt.savefig(outfile,bbox_inches='tight',dpi=60) outfile = samfile.replace('.npy','_6panel.pdf') logger.info(" Writing %s..."%outfile) plt.savefig(outfile,bbox_inches='tight',dpi=60) plt.close() try: title = name plotter.plot4() outfile = samfile.replace('.npy','_4panel.png') logger.info(" Writing %s..."%outfile) plt.suptitle(title) plt.savefig(outfile,bbox_inches='tight',dpi=60) plt.close() except: logger.warning(" Failed to create plotter.plot4()")
[ "def", "do_plot", "(", "args", ")", ":", "import", "ugali", ".", "utils", ".", "plotting", "import", "pylab", "as", "plt", "config", ",", "name", ",", "label", ",", "coord", "=", "args", "filenames", "=", "make_filenames", "(", "config", ",", "label", ")", "srcfile", "=", "filenames", "[", "'srcfile'", "]", "samfile", "=", "filenames", "[", "'samfile'", "]", "memfile", "=", "filenames", "[", "'memfile'", "]", "if", "not", "exists", "(", "srcfile", ")", ":", "logger", ".", "warning", "(", "\"Couldn't find %s; skipping...\"", "%", "srcfile", ")", "return", "if", "not", "exists", "(", "samfile", ")", ":", "logger", ".", "warning", "(", "\"Couldn't find %s; skipping...\"", "%", "samfile", ")", "return", "config", "=", "ugali", ".", "utils", ".", "config", ".", "Config", "(", "config", ")", "burn", "=", "config", "[", "'mcmc'", "]", "[", "'nburn'", "]", "*", "config", "[", "'mcmc'", "]", "[", "'nwalkers'", "]", "source", "=", "ugali", ".", "analysis", ".", "source", ".", "Source", "(", ")", "source", ".", "load", "(", "srcfile", ",", "section", "=", "'source'", ")", "outfile", "=", "samfile", ".", "replace", "(", "'.npy'", ",", "'.png'", ")", "ugali", ".", "utils", ".", "plotting", ".", "plotTriangle", "(", "srcfile", ",", "samfile", ",", "burn", "=", "burn", ")", "logger", ".", "info", "(", "\" Writing %s...\"", "%", "outfile", ")", "plt", ".", "savefig", "(", "outfile", ",", "bbox_inches", "=", "'tight'", ",", "dpi", "=", "60", ")", "plt", ".", "close", "(", ")", "plotter", "=", "ugali", ".", "utils", ".", "plotting", ".", "SourcePlotter", "(", "source", ",", "config", ",", "radius", "=", "0.5", ")", "data", "=", "fitsio", ".", "read", "(", "memfile", ",", "trim_strings", "=", "True", ")", "if", "exists", "(", "memfile", ")", "else", "None", "if", "data", "is", "not", "None", ":", "plt", ".", "figure", "(", ")", "kernel", ",", "isochrone", "=", "source", ".", "kernel", ",", "source", ".", "isochrone", "ugali", ".", "utils", ".", "plotting", ".", "plotMembership", "(", "config", ",", "data", ",", "kernel", ",", "isochrone", ")", "outfile", "=", "samfile", ".", "replace", "(", "'.npy'", ",", "'_mem.png'", ")", "logger", ".", "info", "(", "\" Writing %s...\"", "%", "outfile", ")", "plt", ".", "savefig", "(", "outfile", ",", "bbox_inches", "=", "'tight'", ",", "dpi", "=", "60", ")", "plt", ".", "close", "(", ")", "plotter", ".", "plot6", "(", "data", ")", "outfile", "=", "samfile", ".", "replace", "(", "'.npy'", ",", "'_6panel.png'", ")", "logger", ".", "info", "(", "\" Writing %s...\"", "%", "outfile", ")", "plt", ".", "savefig", "(", "outfile", ",", "bbox_inches", "=", "'tight'", ",", "dpi", "=", "60", ")", "outfile", "=", "samfile", ".", "replace", "(", "'.npy'", ",", "'_6panel.pdf'", ")", "logger", ".", "info", "(", "\" Writing %s...\"", "%", "outfile", ")", "plt", ".", "savefig", "(", "outfile", ",", "bbox_inches", "=", "'tight'", ",", "dpi", "=", "60", ")", "plt", ".", "close", "(", ")", "try", ":", "title", "=", "name", "plotter", ".", "plot4", "(", ")", "outfile", "=", "samfile", ".", "replace", "(", "'.npy'", ",", "'_4panel.png'", ")", "logger", ".", "info", "(", "\" Writing %s...\"", "%", "outfile", ")", "plt", ".", "suptitle", "(", "title", ")", "plt", ".", "savefig", "(", "outfile", ",", "bbox_inches", "=", "'tight'", ",", "dpi", "=", "60", ")", "plt", ".", "close", "(", ")", "except", ":", "logger", ".", "warning", "(", "\" Failed to create plotter.plot4()\"", ")" ]
Create plots of mcmc output
[ "Create", "plots", "of", "mcmc", "output" ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/pipeline/run_05.0_followup.py#L73-L136
train
warrenspe/hconf
hconf/subparsers/dictionary.py
Dictionary.parse
def parse(self, *args): """ Return our initialized dictionary arguments. """ if isinstance(self.dictionary, dict): return self.dictionary raise self.subparserException("Argument passed to Dictionary SubParser is not a dict: %s" % type(self.dictionary))
python
def parse(self, *args): """ Return our initialized dictionary arguments. """ if isinstance(self.dictionary, dict): return self.dictionary raise self.subparserException("Argument passed to Dictionary SubParser is not a dict: %s" % type(self.dictionary))
[ "def", "parse", "(", "self", ",", "*", "args", ")", ":", "if", "isinstance", "(", "self", ".", "dictionary", ",", "dict", ")", ":", "return", "self", ".", "dictionary", "raise", "self", ".", "subparserException", "(", "\"Argument passed to Dictionary SubParser is not a dict: %s\"", "%", "type", "(", "self", ".", "dictionary", ")", ")" ]
Return our initialized dictionary arguments.
[ "Return", "our", "initialized", "dictionary", "arguments", "." ]
12074d15dc3641d3903488c95d89a507386a32d5
https://github.com/warrenspe/hconf/blob/12074d15dc3641d3903488c95d89a507386a32d5/hconf/subparsers/dictionary.py#L39-L47
train
BertrandBordage/django-terms
terms/managers.py
TermManager._caches_dicts
def _caches_dicts(self): """ Caches variants_dict and replace_dict in a single database hit. """ qs = (self.get_query_set() if django.VERSION < (1, 6) else self.get_queryset()) variants_dict = self._get_variants_dict(qs) cache.set(VARIANTS_DICT_CACHE_KEY, variants_dict) replace_dict = self._get_replace_dict(qs) cache.set(REPLACE_DICT_CACHE_KEY, replace_dict) return variants_dict, replace_dict
python
def _caches_dicts(self): """ Caches variants_dict and replace_dict in a single database hit. """ qs = (self.get_query_set() if django.VERSION < (1, 6) else self.get_queryset()) variants_dict = self._get_variants_dict(qs) cache.set(VARIANTS_DICT_CACHE_KEY, variants_dict) replace_dict = self._get_replace_dict(qs) cache.set(REPLACE_DICT_CACHE_KEY, replace_dict) return variants_dict, replace_dict
[ "def", "_caches_dicts", "(", "self", ")", ":", "qs", "=", "(", "self", ".", "get_query_set", "(", ")", "if", "django", ".", "VERSION", "<", "(", "1", ",", "6", ")", "else", "self", ".", "get_queryset", "(", ")", ")", "variants_dict", "=", "self", ".", "_get_variants_dict", "(", "qs", ")", "cache", ".", "set", "(", "VARIANTS_DICT_CACHE_KEY", ",", "variants_dict", ")", "replace_dict", "=", "self", ".", "_get_replace_dict", "(", "qs", ")", "cache", ".", "set", "(", "REPLACE_DICT_CACHE_KEY", ",", "replace_dict", ")", "return", "variants_dict", ",", "replace_dict" ]
Caches variants_dict and replace_dict in a single database hit.
[ "Caches", "variants_dict", "and", "replace_dict", "in", "a", "single", "database", "hit", "." ]
2555c2cf5abf14adef9a8e2dd22c4a9076396a10
https://github.com/BertrandBordage/django-terms/blob/2555c2cf5abf14adef9a8e2dd22c4a9076396a10/terms/managers.py#L41-L55
train
stevearc/dynamo3
dynamo3/exception.py
translate_exception
def translate_exception(exc, kwargs): """ Translate a botocore.exceptions.ClientError into a dynamo3 error """ error = exc.response['Error'] error.setdefault('Message', '') err_class = EXC.get(error['Code'], DynamoDBError) return err_class(exc.response['ResponseMetadata']['HTTPStatusCode'], exc_info=sys.exc_info(), args=pformat(kwargs), **error)
python
def translate_exception(exc, kwargs): """ Translate a botocore.exceptions.ClientError into a dynamo3 error """ error = exc.response['Error'] error.setdefault('Message', '') err_class = EXC.get(error['Code'], DynamoDBError) return err_class(exc.response['ResponseMetadata']['HTTPStatusCode'], exc_info=sys.exc_info(), args=pformat(kwargs), **error)
[ "def", "translate_exception", "(", "exc", ",", "kwargs", ")", ":", "error", "=", "exc", ".", "response", "[", "'Error'", "]", "error", ".", "setdefault", "(", "'Message'", ",", "''", ")", "err_class", "=", "EXC", ".", "get", "(", "error", "[", "'Code'", "]", ",", "DynamoDBError", ")", "return", "err_class", "(", "exc", ".", "response", "[", "'ResponseMetadata'", "]", "[", "'HTTPStatusCode'", "]", ",", "exc_info", "=", "sys", ".", "exc_info", "(", ")", ",", "args", "=", "pformat", "(", "kwargs", ")", ",", "*", "*", "error", ")" ]
Translate a botocore.exceptions.ClientError into a dynamo3 error
[ "Translate", "a", "botocore", ".", "exceptions", ".", "ClientError", "into", "a", "dynamo3", "error" ]
f897c40ece28586272dbcab8f0d99a14a1831dda
https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/exception.py#L50-L56
train
stevearc/dynamo3
dynamo3/exception.py
DynamoDBError.re_raise
def re_raise(self): """ Raise this exception with the original traceback """ if self.exc_info is not None: six.reraise(type(self), self, self.exc_info[2]) else: raise self
python
def re_raise(self): """ Raise this exception with the original traceback """ if self.exc_info is not None: six.reraise(type(self), self, self.exc_info[2]) else: raise self
[ "def", "re_raise", "(", "self", ")", ":", "if", "self", ".", "exc_info", "is", "not", "None", ":", "six", ".", "reraise", "(", "type", "(", "self", ")", ",", "self", ",", "self", ".", "exc_info", "[", "2", "]", ")", "else", ":", "raise", "self" ]
Raise this exception with the original traceback
[ "Raise", "this", "exception", "with", "the", "original", "traceback" ]
f897c40ece28586272dbcab8f0d99a14a1831dda
https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/exception.py#L19-L24
train
totalgood/pugnlp
src/pugnlp/segmentation.py
generate_lines
def generate_lines(text, ext=['.txt', '.md', '.rst', '.asciidoc', '.asc']): r""" Yield text one line at a time from from a single file path, files in a directory, or a text string >>> list(generate_lines('Hello crazy\r\nMS/Apple world\rof EOLS.\n')) ['Hello crazy\r\n', 'MS/Apple world\r', 'of EOLS.\n'] """ if isinstance(text, basestring): if len(text) <= 256: if os.path.isfile(text) and os.path.splitext(text)[-1].lower() in ext: return open(text) elif os.path.isdir(text): return chain.from_iterable(generate_lines(stat['path']) for stat in find_files(text, ext=ext)) else: return (line for line in Split(text=text)) else: return Split(text=text) return chain.from_iterable(generate_lines(obj) for obj in text)
python
def generate_lines(text, ext=['.txt', '.md', '.rst', '.asciidoc', '.asc']): r""" Yield text one line at a time from from a single file path, files in a directory, or a text string >>> list(generate_lines('Hello crazy\r\nMS/Apple world\rof EOLS.\n')) ['Hello crazy\r\n', 'MS/Apple world\r', 'of EOLS.\n'] """ if isinstance(text, basestring): if len(text) <= 256: if os.path.isfile(text) and os.path.splitext(text)[-1].lower() in ext: return open(text) elif os.path.isdir(text): return chain.from_iterable(generate_lines(stat['path']) for stat in find_files(text, ext=ext)) else: return (line for line in Split(text=text)) else: return Split(text=text) return chain.from_iterable(generate_lines(obj) for obj in text)
[ "def", "generate_lines", "(", "text", ",", "ext", "=", "[", "'.txt'", ",", "'.md'", ",", "'.rst'", ",", "'.asciidoc'", ",", "'.asc'", "]", ")", ":", "if", "isinstance", "(", "text", ",", "basestring", ")", ":", "if", "len", "(", "text", ")", "<=", "256", ":", "if", "os", ".", "path", ".", "isfile", "(", "text", ")", "and", "os", ".", "path", ".", "splitext", "(", "text", ")", "[", "-", "1", "]", ".", "lower", "(", ")", "in", "ext", ":", "return", "open", "(", "text", ")", "elif", "os", ".", "path", ".", "isdir", "(", "text", ")", ":", "return", "chain", ".", "from_iterable", "(", "generate_lines", "(", "stat", "[", "'path'", "]", ")", "for", "stat", "in", "find_files", "(", "text", ",", "ext", "=", "ext", ")", ")", "else", ":", "return", "(", "line", "for", "line", "in", "Split", "(", "text", "=", "text", ")", ")", "else", ":", "return", "Split", "(", "text", "=", "text", ")", "return", "chain", ".", "from_iterable", "(", "generate_lines", "(", "obj", ")", "for", "obj", "in", "text", ")" ]
r""" Yield text one line at a time from from a single file path, files in a directory, or a text string >>> list(generate_lines('Hello crazy\r\nMS/Apple world\rof EOLS.\n')) ['Hello crazy\r\n', 'MS/Apple world\r', 'of EOLS.\n']
[ "r", "Yield", "text", "one", "line", "at", "a", "time", "from", "from", "a", "single", "file", "path", "files", "in", "a", "directory", "or", "a", "text", "string" ]
c43445b14afddfdeadc5f3076675c9e8fc1ee67c
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/segmentation.py#L49-L66
train
stevearc/dynamo3
dynamo3/rate.py
DecayingCapacityStore.add
def add(self, now, num): """ Add a timestamp and date to the data """ if num == 0: return self.points.append((now, num))
python
def add(self, now, num): """ Add a timestamp and date to the data """ if num == 0: return self.points.append((now, num))
[ "def", "add", "(", "self", ",", "now", ",", "num", ")", ":", "if", "num", "==", "0", ":", "return", "self", ".", "points", ".", "append", "(", "(", "now", ",", "num", ")", ")" ]
Add a timestamp and date to the data
[ "Add", "a", "timestamp", "and", "date", "to", "the", "data" ]
f897c40ece28586272dbcab8f0d99a14a1831dda
https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/rate.py#L27-L31
train
stevearc/dynamo3
dynamo3/rate.py
DecayingCapacityStore.value
def value(self): """ Get the summation of all non-expired points """ now = time.time() cutoff = now - self.window while self.points and self.points[0][0] < cutoff: self.points.pop(0) return sum([p[1] for p in self.points])
python
def value(self): """ Get the summation of all non-expired points """ now = time.time() cutoff = now - self.window while self.points and self.points[0][0] < cutoff: self.points.pop(0) return sum([p[1] for p in self.points])
[ "def", "value", "(", "self", ")", ":", "now", "=", "time", ".", "time", "(", ")", "cutoff", "=", "now", "-", "self", ".", "window", "while", "self", ".", "points", "and", "self", ".", "points", "[", "0", "]", "[", "0", "]", "<", "cutoff", ":", "self", ".", "points", ".", "pop", "(", "0", ")", "return", "sum", "(", "[", "p", "[", "1", "]", "for", "p", "in", "self", ".", "points", "]", ")" ]
Get the summation of all non-expired points
[ "Get", "the", "summation", "of", "all", "non", "-", "expired", "points" ]
f897c40ece28586272dbcab8f0d99a14a1831dda
https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/rate.py#L34-L40
train
stevearc/dynamo3
dynamo3/rate.py
RateLimit.get_consumed
def get_consumed(self, key): """ Getter for a consumed capacity storage dict """ if key not in self._consumed: self._consumed[key] = { 'read': DecayingCapacityStore(), 'write': DecayingCapacityStore(), } return self._consumed[key]
python
def get_consumed(self, key): """ Getter for a consumed capacity storage dict """ if key not in self._consumed: self._consumed[key] = { 'read': DecayingCapacityStore(), 'write': DecayingCapacityStore(), } return self._consumed[key]
[ "def", "get_consumed", "(", "self", ",", "key", ")", ":", "if", "key", "not", "in", "self", ".", "_consumed", ":", "self", ".", "_consumed", "[", "key", "]", "=", "{", "'read'", ":", "DecayingCapacityStore", "(", ")", ",", "'write'", ":", "DecayingCapacityStore", "(", ")", ",", "}", "return", "self", ".", "_consumed", "[", "key", "]" ]
Getter for a consumed capacity storage dict
[ "Getter", "for", "a", "consumed", "capacity", "storage", "dict" ]
f897c40ece28586272dbcab8f0d99a14a1831dda
https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/rate.py#L96-L103
train
stevearc/dynamo3
dynamo3/rate.py
RateLimit.on_capacity
def on_capacity(self, connection, command, query_kwargs, response, capacity): """ Hook that runs in response to a 'returned capacity' event """ now = time.time() args = (connection, command, query_kwargs, response, capacity) # Check total against the total_cap self._wait(args, now, self.total_cap, self._total_consumed, capacity.total) # Increment table consumed capacity & check it if capacity.tablename in self.table_caps: table_cap = self.table_caps[capacity.tablename] else: table_cap = self.default_cap consumed_history = self.get_consumed(capacity.tablename) if capacity.table_capacity is not None: self._wait(args, now, table_cap, consumed_history, capacity.table_capacity) # The local index consumed capacity also counts against the table if capacity.local_index_capacity is not None: for consumed in six.itervalues(capacity.local_index_capacity): self._wait(args, now, table_cap, consumed_history, consumed) # Increment global indexes # check global indexes against the table+index cap or default gic = capacity.global_index_capacity if gic is not None: for index_name, consumed in six.iteritems(gic): full_name = capacity.tablename + ':' + index_name if index_name in table_cap: index_cap = table_cap[index_name] elif full_name in self.table_caps: index_cap = self.table_caps[full_name] else: # If there's no specified capacity for the index, # use the cap on the table index_cap = table_cap consumed_history = self.get_consumed(full_name) self._wait(args, now, index_cap, consumed_history, consumed)
python
def on_capacity(self, connection, command, query_kwargs, response, capacity): """ Hook that runs in response to a 'returned capacity' event """ now = time.time() args = (connection, command, query_kwargs, response, capacity) # Check total against the total_cap self._wait(args, now, self.total_cap, self._total_consumed, capacity.total) # Increment table consumed capacity & check it if capacity.tablename in self.table_caps: table_cap = self.table_caps[capacity.tablename] else: table_cap = self.default_cap consumed_history = self.get_consumed(capacity.tablename) if capacity.table_capacity is not None: self._wait(args, now, table_cap, consumed_history, capacity.table_capacity) # The local index consumed capacity also counts against the table if capacity.local_index_capacity is not None: for consumed in six.itervalues(capacity.local_index_capacity): self._wait(args, now, table_cap, consumed_history, consumed) # Increment global indexes # check global indexes against the table+index cap or default gic = capacity.global_index_capacity if gic is not None: for index_name, consumed in six.iteritems(gic): full_name = capacity.tablename + ':' + index_name if index_name in table_cap: index_cap = table_cap[index_name] elif full_name in self.table_caps: index_cap = self.table_caps[full_name] else: # If there's no specified capacity for the index, # use the cap on the table index_cap = table_cap consumed_history = self.get_consumed(full_name) self._wait(args, now, index_cap, consumed_history, consumed)
[ "def", "on_capacity", "(", "self", ",", "connection", ",", "command", ",", "query_kwargs", ",", "response", ",", "capacity", ")", ":", "now", "=", "time", ".", "time", "(", ")", "args", "=", "(", "connection", ",", "command", ",", "query_kwargs", ",", "response", ",", "capacity", ")", "# Check total against the total_cap", "self", ".", "_wait", "(", "args", ",", "now", ",", "self", ".", "total_cap", ",", "self", ".", "_total_consumed", ",", "capacity", ".", "total", ")", "# Increment table consumed capacity & check it", "if", "capacity", ".", "tablename", "in", "self", ".", "table_caps", ":", "table_cap", "=", "self", ".", "table_caps", "[", "capacity", ".", "tablename", "]", "else", ":", "table_cap", "=", "self", ".", "default_cap", "consumed_history", "=", "self", ".", "get_consumed", "(", "capacity", ".", "tablename", ")", "if", "capacity", ".", "table_capacity", "is", "not", "None", ":", "self", ".", "_wait", "(", "args", ",", "now", ",", "table_cap", ",", "consumed_history", ",", "capacity", ".", "table_capacity", ")", "# The local index consumed capacity also counts against the table", "if", "capacity", ".", "local_index_capacity", "is", "not", "None", ":", "for", "consumed", "in", "six", ".", "itervalues", "(", "capacity", ".", "local_index_capacity", ")", ":", "self", ".", "_wait", "(", "args", ",", "now", ",", "table_cap", ",", "consumed_history", ",", "consumed", ")", "# Increment global indexes", "# check global indexes against the table+index cap or default", "gic", "=", "capacity", ".", "global_index_capacity", "if", "gic", "is", "not", "None", ":", "for", "index_name", ",", "consumed", "in", "six", ".", "iteritems", "(", "gic", ")", ":", "full_name", "=", "capacity", ".", "tablename", "+", "':'", "+", "index_name", "if", "index_name", "in", "table_cap", ":", "index_cap", "=", "table_cap", "[", "index_name", "]", "elif", "full_name", "in", "self", ".", "table_caps", ":", "index_cap", "=", "self", ".", "table_caps", "[", "full_name", "]", "else", ":", "# If there's no specified capacity for the index,", "# use the cap on the table", "index_cap", "=", "table_cap", "consumed_history", "=", "self", ".", "get_consumed", "(", "full_name", ")", "self", ".", "_wait", "(", "args", ",", "now", ",", "index_cap", ",", "consumed_history", ",", "consumed", ")" ]
Hook that runs in response to a 'returned capacity' event
[ "Hook", "that", "runs", "in", "response", "to", "a", "returned", "capacity", "event" ]
f897c40ece28586272dbcab8f0d99a14a1831dda
https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/rate.py#L105-L143
train
stevearc/dynamo3
dynamo3/rate.py
RateLimit._wait
def _wait(self, args, now, cap, consumed_history, consumed_capacity): """ Check the consumed capacity against the limit and sleep """ for key in ['read', 'write']: if key in cap and cap[key] > 0: consumed_history[key].add(now, consumed_capacity[key]) consumed = consumed_history[key].value if consumed > 0 and consumed >= cap[key]: seconds = math.ceil(float(consumed) / cap[key]) LOG.debug("Rate limited throughput exceeded. Sleeping " "for %d seconds.", seconds) if callable(self.callback): callback_args = args + (seconds,) if self.callback(*callback_args): continue time.sleep(seconds)
python
def _wait(self, args, now, cap, consumed_history, consumed_capacity): """ Check the consumed capacity against the limit and sleep """ for key in ['read', 'write']: if key in cap and cap[key] > 0: consumed_history[key].add(now, consumed_capacity[key]) consumed = consumed_history[key].value if consumed > 0 and consumed >= cap[key]: seconds = math.ceil(float(consumed) / cap[key]) LOG.debug("Rate limited throughput exceeded. Sleeping " "for %d seconds.", seconds) if callable(self.callback): callback_args = args + (seconds,) if self.callback(*callback_args): continue time.sleep(seconds)
[ "def", "_wait", "(", "self", ",", "args", ",", "now", ",", "cap", ",", "consumed_history", ",", "consumed_capacity", ")", ":", "for", "key", "in", "[", "'read'", ",", "'write'", "]", ":", "if", "key", "in", "cap", "and", "cap", "[", "key", "]", ">", "0", ":", "consumed_history", "[", "key", "]", ".", "add", "(", "now", ",", "consumed_capacity", "[", "key", "]", ")", "consumed", "=", "consumed_history", "[", "key", "]", ".", "value", "if", "consumed", ">", "0", "and", "consumed", ">=", "cap", "[", "key", "]", ":", "seconds", "=", "math", ".", "ceil", "(", "float", "(", "consumed", ")", "/", "cap", "[", "key", "]", ")", "LOG", ".", "debug", "(", "\"Rate limited throughput exceeded. Sleeping \"", "\"for %d seconds.\"", ",", "seconds", ")", "if", "callable", "(", "self", ".", "callback", ")", ":", "callback_args", "=", "args", "+", "(", "seconds", ",", ")", "if", "self", ".", "callback", "(", "*", "callback_args", ")", ":", "continue", "time", ".", "sleep", "(", "seconds", ")" ]
Check the consumed capacity against the limit and sleep
[ "Check", "the", "consumed", "capacity", "against", "the", "limit", "and", "sleep" ]
f897c40ece28586272dbcab8f0d99a14a1831dda
https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/rate.py#L145-L159
train
juju/theblues
theblues/support.py
Support.create_case
def create_case(self, name, email, subject, description, businessImpact, priority, phone): """ Send a case creation to SalesForces to create a ticket. @param name of the person creating the case. @param email of the person creating the case. @param subject of the case. @param description of the case. @param businessImpact of the case. @param priority of the case. @param phone of the person creating the case. @return Nothing if this is ok. @raise ServerError when something goes wrong. @raise ValueError when data passed in are invalid """ if not('@' in parseaddr(email)[1]): raise ValueError('invalid email: {}'.format(email)) if '' == name or name is None: raise ValueError('empty name') if '' == subject or subject is None: raise ValueError('empty subject') if '' == description or description is None: raise ValueError('empty description') if '' == businessImpact or businessImpact is None: raise ValueError('empty business impact') if priority is None: raise ValueError('Ensure the priority is from the set of ' 'known priorities') if '' == phone or phone is None: raise ValueError('empty phone') try: r = requests.post(self.url, data={ 'orgid': self.orgId, 'recordType': self.recordType, 'name': name, 'email': email, 'subject': subject, 'description': description, self.BUSINESS_IMPACT: businessImpact, 'priority': priority, 'phone': phone, 'external': 1 }, timeout=self.timeout) r.raise_for_status() except Timeout: message = 'Request timed out: {url} timeout: {timeout}' message = message.format(url=self.url, timeout=self.timeout) log.error(message) raise ServerError(message) except RequestException as err: log.info('cannot create case: {}'.format(err)) raise ServerError( 'cannot create case: {}'.format(err))
python
def create_case(self, name, email, subject, description, businessImpact, priority, phone): """ Send a case creation to SalesForces to create a ticket. @param name of the person creating the case. @param email of the person creating the case. @param subject of the case. @param description of the case. @param businessImpact of the case. @param priority of the case. @param phone of the person creating the case. @return Nothing if this is ok. @raise ServerError when something goes wrong. @raise ValueError when data passed in are invalid """ if not('@' in parseaddr(email)[1]): raise ValueError('invalid email: {}'.format(email)) if '' == name or name is None: raise ValueError('empty name') if '' == subject or subject is None: raise ValueError('empty subject') if '' == description or description is None: raise ValueError('empty description') if '' == businessImpact or businessImpact is None: raise ValueError('empty business impact') if priority is None: raise ValueError('Ensure the priority is from the set of ' 'known priorities') if '' == phone or phone is None: raise ValueError('empty phone') try: r = requests.post(self.url, data={ 'orgid': self.orgId, 'recordType': self.recordType, 'name': name, 'email': email, 'subject': subject, 'description': description, self.BUSINESS_IMPACT: businessImpact, 'priority': priority, 'phone': phone, 'external': 1 }, timeout=self.timeout) r.raise_for_status() except Timeout: message = 'Request timed out: {url} timeout: {timeout}' message = message.format(url=self.url, timeout=self.timeout) log.error(message) raise ServerError(message) except RequestException as err: log.info('cannot create case: {}'.format(err)) raise ServerError( 'cannot create case: {}'.format(err))
[ "def", "create_case", "(", "self", ",", "name", ",", "email", ",", "subject", ",", "description", ",", "businessImpact", ",", "priority", ",", "phone", ")", ":", "if", "not", "(", "'@'", "in", "parseaddr", "(", "email", ")", "[", "1", "]", ")", ":", "raise", "ValueError", "(", "'invalid email: {}'", ".", "format", "(", "email", ")", ")", "if", "''", "==", "name", "or", "name", "is", "None", ":", "raise", "ValueError", "(", "'empty name'", ")", "if", "''", "==", "subject", "or", "subject", "is", "None", ":", "raise", "ValueError", "(", "'empty subject'", ")", "if", "''", "==", "description", "or", "description", "is", "None", ":", "raise", "ValueError", "(", "'empty description'", ")", "if", "''", "==", "businessImpact", "or", "businessImpact", "is", "None", ":", "raise", "ValueError", "(", "'empty business impact'", ")", "if", "priority", "is", "None", ":", "raise", "ValueError", "(", "'Ensure the priority is from the set of '", "'known priorities'", ")", "if", "''", "==", "phone", "or", "phone", "is", "None", ":", "raise", "ValueError", "(", "'empty phone'", ")", "try", ":", "r", "=", "requests", ".", "post", "(", "self", ".", "url", ",", "data", "=", "{", "'orgid'", ":", "self", ".", "orgId", ",", "'recordType'", ":", "self", ".", "recordType", ",", "'name'", ":", "name", ",", "'email'", ":", "email", ",", "'subject'", ":", "subject", ",", "'description'", ":", "description", ",", "self", ".", "BUSINESS_IMPACT", ":", "businessImpact", ",", "'priority'", ":", "priority", ",", "'phone'", ":", "phone", ",", "'external'", ":", "1", "}", ",", "timeout", "=", "self", ".", "timeout", ")", "r", ".", "raise_for_status", "(", ")", "except", "Timeout", ":", "message", "=", "'Request timed out: {url} timeout: {timeout}'", "message", "=", "message", ".", "format", "(", "url", "=", "self", ".", "url", ",", "timeout", "=", "self", ".", "timeout", ")", "log", ".", "error", "(", "message", ")", "raise", "ServerError", "(", "message", ")", "except", "RequestException", "as", "err", ":", "log", ".", "info", "(", "'cannot create case: {}'", ".", "format", "(", "err", ")", ")", "raise", "ServerError", "(", "'cannot create case: {}'", ".", "format", "(", "err", ")", ")" ]
Send a case creation to SalesForces to create a ticket. @param name of the person creating the case. @param email of the person creating the case. @param subject of the case. @param description of the case. @param businessImpact of the case. @param priority of the case. @param phone of the person creating the case. @return Nothing if this is ok. @raise ServerError when something goes wrong. @raise ValueError when data passed in are invalid
[ "Send", "a", "case", "creation", "to", "SalesForces", "to", "create", "a", "ticket", "." ]
f4431f29e43d04fc32f38f4f86cea45cd4e6ae98
https://github.com/juju/theblues/blob/f4431f29e43d04fc32f38f4f86cea45cd4e6ae98/theblues/support.py#L51-L105
train
alphagov/performanceplatform-collector
performanceplatform/collector/gcloud/aggregate.py
get_cumulative_spend
def get_cumulative_spend(key): """ Get the sum of spending for this category up to and including the given month. """ query = ('ROUND(SUM(total_ex_vat), 2) AS total ' 'FROM {table} ' 'WHERE date <= "{year}-{month:02}-01" ' 'AND lot="{lot}" ' 'AND customer_sector="{sector}" ' 'AND supplier_type="{sme_large}"'.format( table=_RAW_SALES_TABLE, year=key.year, month=key.month, lot=key.lot, sector=key.sector, sme_large=key.sme_large)) logging.debug(query) result = scraperwiki.sqlite.select(query) logging.debug(result) value = result[0]['total'] return float(result[0]['total']) if value is not None else 0.0
python
def get_cumulative_spend(key): """ Get the sum of spending for this category up to and including the given month. """ query = ('ROUND(SUM(total_ex_vat), 2) AS total ' 'FROM {table} ' 'WHERE date <= "{year}-{month:02}-01" ' 'AND lot="{lot}" ' 'AND customer_sector="{sector}" ' 'AND supplier_type="{sme_large}"'.format( table=_RAW_SALES_TABLE, year=key.year, month=key.month, lot=key.lot, sector=key.sector, sme_large=key.sme_large)) logging.debug(query) result = scraperwiki.sqlite.select(query) logging.debug(result) value = result[0]['total'] return float(result[0]['total']) if value is not None else 0.0
[ "def", "get_cumulative_spend", "(", "key", ")", ":", "query", "=", "(", "'ROUND(SUM(total_ex_vat), 2) AS total '", "'FROM {table} '", "'WHERE date <= \"{year}-{month:02}-01\" '", "'AND lot=\"{lot}\" '", "'AND customer_sector=\"{sector}\" '", "'AND supplier_type=\"{sme_large}\"'", ".", "format", "(", "table", "=", "_RAW_SALES_TABLE", ",", "year", "=", "key", ".", "year", ",", "month", "=", "key", ".", "month", ",", "lot", "=", "key", ".", "lot", ",", "sector", "=", "key", ".", "sector", ",", "sme_large", "=", "key", ".", "sme_large", ")", ")", "logging", ".", "debug", "(", "query", ")", "result", "=", "scraperwiki", ".", "sqlite", ".", "select", "(", "query", ")", "logging", ".", "debug", "(", "result", ")", "value", "=", "result", "[", "0", "]", "[", "'total'", "]", "return", "float", "(", "result", "[", "0", "]", "[", "'total'", "]", ")", "if", "value", "is", "not", "None", "else", "0.0" ]
Get the sum of spending for this category up to and including the given month.
[ "Get", "the", "sum", "of", "spending", "for", "this", "category", "up", "to", "and", "including", "the", "given", "month", "." ]
de68ab4aa500c31e436e050fa1268fa928c522a5
https://github.com/alphagov/performanceplatform-collector/blob/de68ab4aa500c31e436e050fa1268fa928c522a5/performanceplatform/collector/gcloud/aggregate.py#L130-L151
train
DarkEnergySurvey/ugali
ugali/observation/roi.py
ROI.plot
def plot(self, value=None, pixel=None): """ Plot the ROI """ # DEPRECATED import ugali.utils.plotting map_roi = np.array(hp.UNSEEN \ * np.ones(hp.nside2npix(self.config.params['coords']['nside_pixel']))) if value is None: #map_roi[self.pixels] = ugali.utils.projector.angsep(self.lon, self.lat, self.centers_lon, self.centers_lat) map_roi[self.pixels] = 1 map_roi[self.pixels_annulus] = 0 map_roi[self.pixels_target] = 2 elif value is not None and pixel is None: map_roi[self.pixels] = value elif value is not None and pixel is not None: map_roi[pixel] = value else: logger.error("Can't parse input") ugali.utils.plotting.zoomedHealpixMap('Region of Interest', map_roi, self.lon, self.lat, self.config.params['coords']['roi_radius'])
python
def plot(self, value=None, pixel=None): """ Plot the ROI """ # DEPRECATED import ugali.utils.plotting map_roi = np.array(hp.UNSEEN \ * np.ones(hp.nside2npix(self.config.params['coords']['nside_pixel']))) if value is None: #map_roi[self.pixels] = ugali.utils.projector.angsep(self.lon, self.lat, self.centers_lon, self.centers_lat) map_roi[self.pixels] = 1 map_roi[self.pixels_annulus] = 0 map_roi[self.pixels_target] = 2 elif value is not None and pixel is None: map_roi[self.pixels] = value elif value is not None and pixel is not None: map_roi[pixel] = value else: logger.error("Can't parse input") ugali.utils.plotting.zoomedHealpixMap('Region of Interest', map_roi, self.lon, self.lat, self.config.params['coords']['roi_radius'])
[ "def", "plot", "(", "self", ",", "value", "=", "None", ",", "pixel", "=", "None", ")", ":", "# DEPRECATED", "import", "ugali", ".", "utils", ".", "plotting", "map_roi", "=", "np", ".", "array", "(", "hp", ".", "UNSEEN", "*", "np", ".", "ones", "(", "hp", ".", "nside2npix", "(", "self", ".", "config", ".", "params", "[", "'coords'", "]", "[", "'nside_pixel'", "]", ")", ")", ")", "if", "value", "is", "None", ":", "#map_roi[self.pixels] = ugali.utils.projector.angsep(self.lon, self.lat, self.centers_lon, self.centers_lat)", "map_roi", "[", "self", ".", "pixels", "]", "=", "1", "map_roi", "[", "self", ".", "pixels_annulus", "]", "=", "0", "map_roi", "[", "self", ".", "pixels_target", "]", "=", "2", "elif", "value", "is", "not", "None", "and", "pixel", "is", "None", ":", "map_roi", "[", "self", ".", "pixels", "]", "=", "value", "elif", "value", "is", "not", "None", "and", "pixel", "is", "not", "None", ":", "map_roi", "[", "pixel", "]", "=", "value", "else", ":", "logger", ".", "error", "(", "\"Can't parse input\"", ")", "ugali", ".", "utils", ".", "plotting", ".", "zoomedHealpixMap", "(", "'Region of Interest'", ",", "map_roi", ",", "self", ".", "lon", ",", "self", ".", "lat", ",", "self", ".", "config", ".", "params", "[", "'coords'", "]", "[", "'roi_radius'", "]", ")" ]
Plot the ROI
[ "Plot", "the", "ROI" ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/observation/roi.py#L135-L160
train
DarkEnergySurvey/ugali
ugali/observation/roi.py
ROI.inPixels
def inPixels(self,lon,lat,pixels): """ Function for testing if coordintes in set of ROI pixels. """ nside = self.config.params['coords']['nside_pixel'] return ugali.utils.healpix.in_pixels(lon,lat,pixels,nside)
python
def inPixels(self,lon,lat,pixels): """ Function for testing if coordintes in set of ROI pixels. """ nside = self.config.params['coords']['nside_pixel'] return ugali.utils.healpix.in_pixels(lon,lat,pixels,nside)
[ "def", "inPixels", "(", "self", ",", "lon", ",", "lat", ",", "pixels", ")", ":", "nside", "=", "self", ".", "config", ".", "params", "[", "'coords'", "]", "[", "'nside_pixel'", "]", "return", "ugali", ".", "utils", ".", "healpix", ".", "in_pixels", "(", "lon", ",", "lat", ",", "pixels", ",", "nside", ")" ]
Function for testing if coordintes in set of ROI pixels.
[ "Function", "for", "testing", "if", "coordintes", "in", "set", "of", "ROI", "pixels", "." ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/observation/roi.py#L163-L166
train
DarkEnergySurvey/ugali
ugali/observation/roi.py
ROI.getCatalogPixels
def getCatalogPixels(self): """ Return the catalog pixels spanned by this ROI. """ filenames = self.config.getFilenames() nside_catalog = self.config.params['coords']['nside_catalog'] nside_pixel = self.config.params['coords']['nside_pixel'] # All possible catalog pixels spanned by the ROI superpix = ugali.utils.skymap.superpixel(self.pixels,nside_pixel,nside_catalog) superpix = np.unique(superpix) # Only catalog pixels that exist in catalog files pixels = np.intersect1d(superpix, filenames['pix'].compressed()) return pixels
python
def getCatalogPixels(self): """ Return the catalog pixels spanned by this ROI. """ filenames = self.config.getFilenames() nside_catalog = self.config.params['coords']['nside_catalog'] nside_pixel = self.config.params['coords']['nside_pixel'] # All possible catalog pixels spanned by the ROI superpix = ugali.utils.skymap.superpixel(self.pixels,nside_pixel,nside_catalog) superpix = np.unique(superpix) # Only catalog pixels that exist in catalog files pixels = np.intersect1d(superpix, filenames['pix'].compressed()) return pixels
[ "def", "getCatalogPixels", "(", "self", ")", ":", "filenames", "=", "self", ".", "config", ".", "getFilenames", "(", ")", "nside_catalog", "=", "self", ".", "config", ".", "params", "[", "'coords'", "]", "[", "'nside_catalog'", "]", "nside_pixel", "=", "self", ".", "config", ".", "params", "[", "'coords'", "]", "[", "'nside_pixel'", "]", "# All possible catalog pixels spanned by the ROI", "superpix", "=", "ugali", ".", "utils", ".", "skymap", ".", "superpixel", "(", "self", ".", "pixels", ",", "nside_pixel", ",", "nside_catalog", ")", "superpix", "=", "np", ".", "unique", "(", "superpix", ")", "# Only catalog pixels that exist in catalog files", "pixels", "=", "np", ".", "intersect1d", "(", "superpix", ",", "filenames", "[", "'pix'", "]", ".", "compressed", "(", ")", ")", "return", "pixels" ]
Return the catalog pixels spanned by this ROI.
[ "Return", "the", "catalog", "pixels", "spanned", "by", "this", "ROI", "." ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/observation/roi.py#L196-L209
train
stevearc/dynamo3
dynamo3/fields.py
BaseIndex.schema
def schema(self, hash_key): """ Create the index schema Parameters ---------- hash_key : :class:`~.DynamoKey` The hash key of the table """ key_schema = [hash_key.hash_schema()] if self.range_key is not None: key_schema.append(self.range_key.range_schema()) schema_data = { 'IndexName': self.name, 'KeySchema': key_schema, 'Projection': { 'ProjectionType': self.projection_type, } } if self.include_fields is not None: schema_data['Projection']['NonKeyAttributes'] = self.include_fields return schema_data
python
def schema(self, hash_key): """ Create the index schema Parameters ---------- hash_key : :class:`~.DynamoKey` The hash key of the table """ key_schema = [hash_key.hash_schema()] if self.range_key is not None: key_schema.append(self.range_key.range_schema()) schema_data = { 'IndexName': self.name, 'KeySchema': key_schema, 'Projection': { 'ProjectionType': self.projection_type, } } if self.include_fields is not None: schema_data['Projection']['NonKeyAttributes'] = self.include_fields return schema_data
[ "def", "schema", "(", "self", ",", "hash_key", ")", ":", "key_schema", "=", "[", "hash_key", ".", "hash_schema", "(", ")", "]", "if", "self", ".", "range_key", "is", "not", "None", ":", "key_schema", ".", "append", "(", "self", ".", "range_key", ".", "range_schema", "(", ")", ")", "schema_data", "=", "{", "'IndexName'", ":", "self", ".", "name", ",", "'KeySchema'", ":", "key_schema", ",", "'Projection'", ":", "{", "'ProjectionType'", ":", "self", ".", "projection_type", ",", "}", "}", "if", "self", ".", "include_fields", "is", "not", "None", ":", "schema_data", "[", "'Projection'", "]", "[", "'NonKeyAttributes'", "]", "=", "self", ".", "include_fields", "return", "schema_data" ]
Create the index schema Parameters ---------- hash_key : :class:`~.DynamoKey` The hash key of the table
[ "Create", "the", "index", "schema" ]
f897c40ece28586272dbcab8f0d99a14a1831dda
https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/fields.py#L76-L98
train
stevearc/dynamo3
dynamo3/fields.py
GlobalIndex.all
def all(cls, name, hash_key, range_key=None, throughput=None): """ Create an index that projects all attributes """ return cls(cls.ALL, name, hash_key, range_key, throughput=throughput)
python
def all(cls, name, hash_key, range_key=None, throughput=None): """ Create an index that projects all attributes """ return cls(cls.ALL, name, hash_key, range_key, throughput=throughput)
[ "def", "all", "(", "cls", ",", "name", ",", "hash_key", ",", "range_key", "=", "None", ",", "throughput", "=", "None", ")", ":", "return", "cls", "(", "cls", ".", "ALL", ",", "name", ",", "hash_key", ",", "range_key", ",", "throughput", "=", "throughput", ")" ]
Create an index that projects all attributes
[ "Create", "an", "index", "that", "projects", "all", "attributes" ]
f897c40ece28586272dbcab8f0d99a14a1831dda
https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/fields.py#L174-L176
train
stevearc/dynamo3
dynamo3/fields.py
GlobalIndex.keys
def keys(cls, name, hash_key, range_key=None, throughput=None): """ Create an index that projects only key attributes """ return cls(cls.KEYS, name, hash_key, range_key, throughput=throughput)
python
def keys(cls, name, hash_key, range_key=None, throughput=None): """ Create an index that projects only key attributes """ return cls(cls.KEYS, name, hash_key, range_key, throughput=throughput)
[ "def", "keys", "(", "cls", ",", "name", ",", "hash_key", ",", "range_key", "=", "None", ",", "throughput", "=", "None", ")", ":", "return", "cls", "(", "cls", ".", "KEYS", ",", "name", ",", "hash_key", ",", "range_key", ",", "throughput", "=", "throughput", ")" ]
Create an index that projects only key attributes
[ "Create", "an", "index", "that", "projects", "only", "key", "attributes" ]
f897c40ece28586272dbcab8f0d99a14a1831dda
https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/fields.py#L179-L182
train
stevearc/dynamo3
dynamo3/fields.py
GlobalIndex.schema
def schema(self): """ Construct the schema definition for this index """ schema_data = super(GlobalIndex, self).schema(self.hash_key) schema_data['ProvisionedThroughput'] = self.throughput.schema() return schema_data
python
def schema(self): """ Construct the schema definition for this index """ schema_data = super(GlobalIndex, self).schema(self.hash_key) schema_data['ProvisionedThroughput'] = self.throughput.schema() return schema_data
[ "def", "schema", "(", "self", ")", ":", "schema_data", "=", "super", "(", "GlobalIndex", ",", "self", ")", ".", "schema", "(", "self", ".", "hash_key", ")", "schema_data", "[", "'ProvisionedThroughput'", "]", "=", "self", ".", "throughput", ".", "schema", "(", ")", "return", "schema_data" ]
Construct the schema definition for this index
[ "Construct", "the", "schema", "definition", "for", "this", "index" ]
f897c40ece28586272dbcab8f0d99a14a1831dda
https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/fields.py#L191-L195
train
stevearc/dynamo3
dynamo3/fields.py
Table.from_response
def from_response(cls, response): """ Create a Table from returned Dynamo data """ hash_key = None range_key = None # KeySchema may not be in the response if the TableStatus is DELETING. if 'KeySchema' in response: attrs = dict(((d['AttributeName'], DynamoKey(d['AttributeName'], d['AttributeType'])) for d in response['AttributeDefinitions'])) hash_key = attrs[response['KeySchema'][0]['AttributeName']] if len(response['KeySchema']) > 1: range_key = attrs[response['KeySchema'][1]['AttributeName']] indexes = [] for idx in response.get('LocalSecondaryIndexes', []): indexes.append(LocalIndex.from_response(idx, attrs)) global_indexes = [] for idx in response.get('GlobalSecondaryIndexes', []): global_indexes.append(GlobalIndex.from_response(idx, attrs)) table = cls( name=response['TableName'], hash_key=hash_key, range_key=range_key, indexes=indexes, global_indexes=global_indexes, throughput=Throughput.from_response( response['ProvisionedThroughput']), status=response['TableStatus'], size=response['TableSizeBytes'], ) table.response = response return table
python
def from_response(cls, response): """ Create a Table from returned Dynamo data """ hash_key = None range_key = None # KeySchema may not be in the response if the TableStatus is DELETING. if 'KeySchema' in response: attrs = dict(((d['AttributeName'], DynamoKey(d['AttributeName'], d['AttributeType'])) for d in response['AttributeDefinitions'])) hash_key = attrs[response['KeySchema'][0]['AttributeName']] if len(response['KeySchema']) > 1: range_key = attrs[response['KeySchema'][1]['AttributeName']] indexes = [] for idx in response.get('LocalSecondaryIndexes', []): indexes.append(LocalIndex.from_response(idx, attrs)) global_indexes = [] for idx in response.get('GlobalSecondaryIndexes', []): global_indexes.append(GlobalIndex.from_response(idx, attrs)) table = cls( name=response['TableName'], hash_key=hash_key, range_key=range_key, indexes=indexes, global_indexes=global_indexes, throughput=Throughput.from_response( response['ProvisionedThroughput']), status=response['TableStatus'], size=response['TableSizeBytes'], ) table.response = response return table
[ "def", "from_response", "(", "cls", ",", "response", ")", ":", "hash_key", "=", "None", "range_key", "=", "None", "# KeySchema may not be in the response if the TableStatus is DELETING.", "if", "'KeySchema'", "in", "response", ":", "attrs", "=", "dict", "(", "(", "(", "d", "[", "'AttributeName'", "]", ",", "DynamoKey", "(", "d", "[", "'AttributeName'", "]", ",", "d", "[", "'AttributeType'", "]", ")", ")", "for", "d", "in", "response", "[", "'AttributeDefinitions'", "]", ")", ")", "hash_key", "=", "attrs", "[", "response", "[", "'KeySchema'", "]", "[", "0", "]", "[", "'AttributeName'", "]", "]", "if", "len", "(", "response", "[", "'KeySchema'", "]", ")", ">", "1", ":", "range_key", "=", "attrs", "[", "response", "[", "'KeySchema'", "]", "[", "1", "]", "[", "'AttributeName'", "]", "]", "indexes", "=", "[", "]", "for", "idx", "in", "response", ".", "get", "(", "'LocalSecondaryIndexes'", ",", "[", "]", ")", ":", "indexes", ".", "append", "(", "LocalIndex", ".", "from_response", "(", "idx", ",", "attrs", ")", ")", "global_indexes", "=", "[", "]", "for", "idx", "in", "response", ".", "get", "(", "'GlobalSecondaryIndexes'", ",", "[", "]", ")", ":", "global_indexes", ".", "append", "(", "GlobalIndex", ".", "from_response", "(", "idx", ",", "attrs", ")", ")", "table", "=", "cls", "(", "name", "=", "response", "[", "'TableName'", "]", ",", "hash_key", "=", "hash_key", ",", "range_key", "=", "range_key", ",", "indexes", "=", "indexes", ",", "global_indexes", "=", "global_indexes", ",", "throughput", "=", "Throughput", ".", "from_response", "(", "response", "[", "'ProvisionedThroughput'", "]", ")", ",", "status", "=", "response", "[", "'TableStatus'", "]", ",", "size", "=", "response", "[", "'TableSizeBytes'", "]", ",", ")", "table", ".", "response", "=", "response", "return", "table" ]
Create a Table from returned Dynamo data
[ "Create", "a", "Table", "from", "returned", "Dynamo", "data" ]
f897c40ece28586272dbcab8f0d99a14a1831dda
https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/fields.py#L296-L328
train
stevearc/dynamo3
dynamo3/fields.py
IndexUpdate.serialize
def serialize(self): """ Get the serialized Dynamo format for the update """ if self.action == 'Create': payload = self.extra['index'].schema() else: payload = { 'IndexName': self.index_name, } if self.action == 'Update': payload['ProvisionedThroughput'] = \ self.extra['throughput'].schema() return { self.action: payload }
python
def serialize(self): """ Get the serialized Dynamo format for the update """ if self.action == 'Create': payload = self.extra['index'].schema() else: payload = { 'IndexName': self.index_name, } if self.action == 'Update': payload['ProvisionedThroughput'] = \ self.extra['throughput'].schema() return { self.action: payload }
[ "def", "serialize", "(", "self", ")", ":", "if", "self", ".", "action", "==", "'Create'", ":", "payload", "=", "self", ".", "extra", "[", "'index'", "]", ".", "schema", "(", ")", "else", ":", "payload", "=", "{", "'IndexName'", ":", "self", ".", "index_name", ",", "}", "if", "self", ".", "action", "==", "'Update'", ":", "payload", "[", "'ProvisionedThroughput'", "]", "=", "self", ".", "extra", "[", "'throughput'", "]", ".", "schema", "(", ")", "return", "{", "self", ".", "action", ":", "payload", "}" ]
Get the serialized Dynamo format for the update
[ "Get", "the", "serialized", "Dynamo", "format", "for", "the", "update" ]
f897c40ece28586272dbcab8f0d99a14a1831dda
https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/fields.py#L388-L401
train
delicb/mvvm
mvvm.py
_Messenger.instance
def instance(cls, interval=5): ''' Returns existing instance of messenger. If one does not exist it will be created and returned. :param int interval: Number of miliseconds that represents interval when messages will be processed. Note that this parameter will be used only the first time when instance is requested, every other time it will be ignored because existing instance of :class:`._Messenger` is returned. ''' if not cls._instance: cls._instance = _Messenger(interval) return cls._instance
python
def instance(cls, interval=5): ''' Returns existing instance of messenger. If one does not exist it will be created and returned. :param int interval: Number of miliseconds that represents interval when messages will be processed. Note that this parameter will be used only the first time when instance is requested, every other time it will be ignored because existing instance of :class:`._Messenger` is returned. ''' if not cls._instance: cls._instance = _Messenger(interval) return cls._instance
[ "def", "instance", "(", "cls", ",", "interval", "=", "5", ")", ":", "if", "not", "cls", ".", "_instance", ":", "cls", ".", "_instance", "=", "_Messenger", "(", "interval", ")", "return", "cls", ".", "_instance" ]
Returns existing instance of messenger. If one does not exist it will be created and returned. :param int interval: Number of miliseconds that represents interval when messages will be processed. Note that this parameter will be used only the first time when instance is requested, every other time it will be ignored because existing instance of :class:`._Messenger` is returned.
[ "Returns", "existing", "instance", "of", "messenger", ".", "If", "one", "does", "not", "exist", "it", "will", "be", "created", "and", "returned", "." ]
29bf0ab2cc0835b58bed75b2606a9b380c38a272
https://github.com/delicb/mvvm/blob/29bf0ab2cc0835b58bed75b2606a9b380c38a272/mvvm.py#L87-L101
train
delicb/mvvm
mvvm.py
_Messenger.send
def send(self, message, *args, **kwargs): ''' Sends provided message to all listeners. Message is only added to queue and will be processed on next tick. :param Message message: Message to send. ''' self._messages.put((message, args, kwargs), False)
python
def send(self, message, *args, **kwargs): ''' Sends provided message to all listeners. Message is only added to queue and will be processed on next tick. :param Message message: Message to send. ''' self._messages.put((message, args, kwargs), False)
[ "def", "send", "(", "self", ",", "message", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_messages", ".", "put", "(", "(", "message", ",", "args", ",", "kwargs", ")", ",", "False", ")" ]
Sends provided message to all listeners. Message is only added to queue and will be processed on next tick. :param Message message: Message to send.
[ "Sends", "provided", "message", "to", "all", "listeners", ".", "Message", "is", "only", "added", "to", "queue", "and", "will", "be", "processed", "on", "next", "tick", "." ]
29bf0ab2cc0835b58bed75b2606a9b380c38a272
https://github.com/delicb/mvvm/blob/29bf0ab2cc0835b58bed75b2606a9b380c38a272/mvvm.py#L117-L125
train
delicb/mvvm
mvvm.py
_Messenger.subscribe
def subscribe(self, message, handler): ''' Adds hander for specified message. :param str message: Name of message to subscribe to. :param callable handler: Handler for this message type. Handler must receive single parameter and that parameter will be instance of sent message. ''' with self._lock: ref = WeakCallable(handler, self._on_collect) self._subscribers[message].append(ref)
python
def subscribe(self, message, handler): ''' Adds hander for specified message. :param str message: Name of message to subscribe to. :param callable handler: Handler for this message type. Handler must receive single parameter and that parameter will be instance of sent message. ''' with self._lock: ref = WeakCallable(handler, self._on_collect) self._subscribers[message].append(ref)
[ "def", "subscribe", "(", "self", ",", "message", ",", "handler", ")", ":", "with", "self", ".", "_lock", ":", "ref", "=", "WeakCallable", "(", "handler", ",", "self", ".", "_on_collect", ")", "self", ".", "_subscribers", "[", "message", "]", ".", "append", "(", "ref", ")" ]
Adds hander for specified message. :param str message: Name of message to subscribe to. :param callable handler: Handler for this message type. Handler must receive single parameter and that parameter will be instance of sent message.
[ "Adds", "hander", "for", "specified", "message", "." ]
29bf0ab2cc0835b58bed75b2606a9b380c38a272
https://github.com/delicb/mvvm/blob/29bf0ab2cc0835b58bed75b2606a9b380c38a272/mvvm.py#L127-L140
train
delicb/mvvm
mvvm.py
_Messenger.unsubscribe
def unsubscribe(self, message, handler): ''' Removes handler from message listeners. :param str message: Name of message to unsubscribe handler from. :param callable handler: Callable that should be removed as handler for `message`. ''' with self._lock: self._subscribers[message].remove(WeakCallable(handler))
python
def unsubscribe(self, message, handler): ''' Removes handler from message listeners. :param str message: Name of message to unsubscribe handler from. :param callable handler: Callable that should be removed as handler for `message`. ''' with self._lock: self._subscribers[message].remove(WeakCallable(handler))
[ "def", "unsubscribe", "(", "self", ",", "message", ",", "handler", ")", ":", "with", "self", ".", "_lock", ":", "self", ".", "_subscribers", "[", "message", "]", ".", "remove", "(", "WeakCallable", "(", "handler", ")", ")" ]
Removes handler from message listeners. :param str message: Name of message to unsubscribe handler from. :param callable handler: Callable that should be removed as handler for `message`.
[ "Removes", "handler", "from", "message", "listeners", "." ]
29bf0ab2cc0835b58bed75b2606a9b380c38a272
https://github.com/delicb/mvvm/blob/29bf0ab2cc0835b58bed75b2606a9b380c38a272/mvvm.py#L143-L154
train
delicb/mvvm
mvvm.py
_Messenger._execute
def _execute(self, sender, event_args): ''' Event handler for timer that processes all queued messages. ''' with self._lock: while not self._messages.empty(): msg, args, kwargs = self._messages.get(False) for subscriber in self._subscribers[msg]: try: subscriber(*args, **kwargs) except weakref.ReferenceError: # Reference to handler is lost and it is OK to silence it pass
python
def _execute(self, sender, event_args): ''' Event handler for timer that processes all queued messages. ''' with self._lock: while not self._messages.empty(): msg, args, kwargs = self._messages.get(False) for subscriber in self._subscribers[msg]: try: subscriber(*args, **kwargs) except weakref.ReferenceError: # Reference to handler is lost and it is OK to silence it pass
[ "def", "_execute", "(", "self", ",", "sender", ",", "event_args", ")", ":", "with", "self", ".", "_lock", ":", "while", "not", "self", ".", "_messages", ".", "empty", "(", ")", ":", "msg", ",", "args", ",", "kwargs", "=", "self", ".", "_messages", ".", "get", "(", "False", ")", "for", "subscriber", "in", "self", ".", "_subscribers", "[", "msg", "]", ":", "try", ":", "subscriber", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "weakref", ".", "ReferenceError", ":", "# Reference to handler is lost and it is OK to silence it", "pass" ]
Event handler for timer that processes all queued messages.
[ "Event", "handler", "for", "timer", "that", "processes", "all", "queued", "messages", "." ]
29bf0ab2cc0835b58bed75b2606a9b380c38a272
https://github.com/delicb/mvvm/blob/29bf0ab2cc0835b58bed75b2606a9b380c38a272/mvvm.py#L156-L168
train
delicb/mvvm
mvvm.py
Signal.emit
def emit(self, *args, **kwargs): ''' Emits this signal. As result, all handlers will be invoked. ''' self._messanger.send(self, *args, **kwargs)
python
def emit(self, *args, **kwargs): ''' Emits this signal. As result, all handlers will be invoked. ''' self._messanger.send(self, *args, **kwargs)
[ "def", "emit", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_messanger", ".", "send", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Emits this signal. As result, all handlers will be invoked.
[ "Emits", "this", "signal", ".", "As", "result", "all", "handlers", "will", "be", "invoked", "." ]
29bf0ab2cc0835b58bed75b2606a9b380c38a272
https://github.com/delicb/mvvm/blob/29bf0ab2cc0835b58bed75b2606a9b380c38a272/mvvm.py#L204-L208
train
delicb/mvvm
mvvm.py
ViewModel.RaisePropertyChanged
def RaisePropertyChanged(self, property_name): ''' Raises event that property value has changed for provided property name. :param str property_name: Name of property whose value has changed. ''' args = PropertyChangedEventArgs(property_name) for handler in self.property_chaged_handlers: handler(self, args)
python
def RaisePropertyChanged(self, property_name): ''' Raises event that property value has changed for provided property name. :param str property_name: Name of property whose value has changed. ''' args = PropertyChangedEventArgs(property_name) for handler in self.property_chaged_handlers: handler(self, args)
[ "def", "RaisePropertyChanged", "(", "self", ",", "property_name", ")", ":", "args", "=", "PropertyChangedEventArgs", "(", "property_name", ")", "for", "handler", "in", "self", ".", "property_chaged_handlers", ":", "handler", "(", "self", ",", "args", ")" ]
Raises event that property value has changed for provided property name. :param str property_name: Name of property whose value has changed.
[ "Raises", "event", "that", "property", "value", "has", "changed", "for", "provided", "property", "name", "." ]
29bf0ab2cc0835b58bed75b2606a9b380c38a272
https://github.com/delicb/mvvm/blob/29bf0ab2cc0835b58bed75b2606a9b380c38a272/mvvm.py#L350-L359
train
totalgood/pugnlp
src/pugnlp/futil.py
walk_level
def walk_level(path, level=1): """Like os.walk, but takes `level` kwarg that indicates how deep the recursion will go. Notes: TODO: refactor `level`->`depth` References: http://stackoverflow.com/a/234329/623735 Args: path (str): Root path to begin file tree traversal (walk) level (int, optional): Depth of file tree to halt recursion at. None = full recursion to as deep as it goes 0 = nonrecursive, just provide a list of files at the root level of the tree 1 = one level of depth deeper in the tree Examples: >>> root = os.path.dirname(__file__) >>> all((os.path.join(base,d).count('/') == (root.count('/')+1)) ... for (base, dirs, files) in walk_level(root, level=0) for d in dirs) True """ if level is None: level = float('inf') path = expand_path(path) if os.path.isdir(path): root_level = path.count(os.path.sep) for root, dirs, files in os.walk(path): yield root, dirs, files if root.count(os.path.sep) >= root_level + level: del dirs[:] elif os.path.isfile(path): yield os.path.dirname(path), [], [os.path.basename(path)] else: raise RuntimeError("Can't find a valid folder or file for path {0}".format(repr(path)))
python
def walk_level(path, level=1): """Like os.walk, but takes `level` kwarg that indicates how deep the recursion will go. Notes: TODO: refactor `level`->`depth` References: http://stackoverflow.com/a/234329/623735 Args: path (str): Root path to begin file tree traversal (walk) level (int, optional): Depth of file tree to halt recursion at. None = full recursion to as deep as it goes 0 = nonrecursive, just provide a list of files at the root level of the tree 1 = one level of depth deeper in the tree Examples: >>> root = os.path.dirname(__file__) >>> all((os.path.join(base,d).count('/') == (root.count('/')+1)) ... for (base, dirs, files) in walk_level(root, level=0) for d in dirs) True """ if level is None: level = float('inf') path = expand_path(path) if os.path.isdir(path): root_level = path.count(os.path.sep) for root, dirs, files in os.walk(path): yield root, dirs, files if root.count(os.path.sep) >= root_level + level: del dirs[:] elif os.path.isfile(path): yield os.path.dirname(path), [], [os.path.basename(path)] else: raise RuntimeError("Can't find a valid folder or file for path {0}".format(repr(path)))
[ "def", "walk_level", "(", "path", ",", "level", "=", "1", ")", ":", "if", "level", "is", "None", ":", "level", "=", "float", "(", "'inf'", ")", "path", "=", "expand_path", "(", "path", ")", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "root_level", "=", "path", ".", "count", "(", "os", ".", "path", ".", "sep", ")", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "path", ")", ":", "yield", "root", ",", "dirs", ",", "files", "if", "root", ".", "count", "(", "os", ".", "path", ".", "sep", ")", ">=", "root_level", "+", "level", ":", "del", "dirs", "[", ":", "]", "elif", "os", ".", "path", ".", "isfile", "(", "path", ")", ":", "yield", "os", ".", "path", ".", "dirname", "(", "path", ")", ",", "[", "]", ",", "[", "os", ".", "path", ".", "basename", "(", "path", ")", "]", "else", ":", "raise", "RuntimeError", "(", "\"Can't find a valid folder or file for path {0}\"", ".", "format", "(", "repr", "(", "path", ")", ")", ")" ]
Like os.walk, but takes `level` kwarg that indicates how deep the recursion will go. Notes: TODO: refactor `level`->`depth` References: http://stackoverflow.com/a/234329/623735 Args: path (str): Root path to begin file tree traversal (walk) level (int, optional): Depth of file tree to halt recursion at. None = full recursion to as deep as it goes 0 = nonrecursive, just provide a list of files at the root level of the tree 1 = one level of depth deeper in the tree Examples: >>> root = os.path.dirname(__file__) >>> all((os.path.join(base,d).count('/') == (root.count('/')+1)) ... for (base, dirs, files) in walk_level(root, level=0) for d in dirs) True
[ "Like", "os", ".", "walk", "but", "takes", "level", "kwarg", "that", "indicates", "how", "deep", "the", "recursion", "will", "go", "." ]
c43445b14afddfdeadc5f3076675c9e8fc1ee67c
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/futil.py#L45-L79
train
totalgood/pugnlp
src/pugnlp/futil.py
get_stat
def get_stat(full_path): """Use python builtin equivalents to unix `stat` command and return dict containing stat data about a file""" status = {} status['size'] = os.path.getsize(full_path) status['accessed'] = datetime.datetime.fromtimestamp(os.path.getatime(full_path)) status['modified'] = datetime.datetime.fromtimestamp(os.path.getmtime(full_path)) status['changed_any'] = datetime.datetime.fromtimestamp(os.path.getctime(full_path)) # first 3 digits are User, Group, Other permissions: 1=execute,2=write,4=read status['mode'] = os.stat(full_path).st_mode status['type'] = get_type(full_path) return status
python
def get_stat(full_path): """Use python builtin equivalents to unix `stat` command and return dict containing stat data about a file""" status = {} status['size'] = os.path.getsize(full_path) status['accessed'] = datetime.datetime.fromtimestamp(os.path.getatime(full_path)) status['modified'] = datetime.datetime.fromtimestamp(os.path.getmtime(full_path)) status['changed_any'] = datetime.datetime.fromtimestamp(os.path.getctime(full_path)) # first 3 digits are User, Group, Other permissions: 1=execute,2=write,4=read status['mode'] = os.stat(full_path).st_mode status['type'] = get_type(full_path) return status
[ "def", "get_stat", "(", "full_path", ")", ":", "status", "=", "{", "}", "status", "[", "'size'", "]", "=", "os", ".", "path", ".", "getsize", "(", "full_path", ")", "status", "[", "'accessed'", "]", "=", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "os", ".", "path", ".", "getatime", "(", "full_path", ")", ")", "status", "[", "'modified'", "]", "=", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "os", ".", "path", ".", "getmtime", "(", "full_path", ")", ")", "status", "[", "'changed_any'", "]", "=", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "os", ".", "path", ".", "getctime", "(", "full_path", ")", ")", "# first 3 digits are User, Group, Other permissions: 1=execute,2=write,4=read", "status", "[", "'mode'", "]", "=", "os", ".", "stat", "(", "full_path", ")", ".", "st_mode", "status", "[", "'type'", "]", "=", "get_type", "(", "full_path", ")", "return", "status" ]
Use python builtin equivalents to unix `stat` command and return dict containing stat data about a file
[ "Use", "python", "builtin", "equivalents", "to", "unix", "stat", "command", "and", "return", "dict", "containing", "stat", "data", "about", "a", "file" ]
c43445b14afddfdeadc5f3076675c9e8fc1ee67c
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/futil.py#L109-L119
train
DarkEnergySurvey/ugali
ugali/preprocess/maglims.py
split
def split(config,dirname='split',force=False): """ Take a pre-existing maglim map and divide it into chunks consistent with the catalog pixels. """ config = Config(config) filenames = config.getFilenames() #healpix = filenames['pix'].compressed() # Check that things are ok basedir,basename = os.path.split(config['mask']['dirname']) #if basename == dirname: # msg = "Input and output directory are the same." # raise Exception(msg) outdir = mkdir(os.path.join(basedir,dirname)) nside_catalog = config['coords']['nside_catalog'] nside_pixel = config['coords']['nside_pixel'] release = config['data']['release'].lower() band1 = config['catalog']['mag_1_band'] band2 = config['catalog']['mag_2_band'] # Read the magnitude limits maglimdir = config['maglim']['dirname'] maglimfile_1 = join(maglimdir,config['maglim']['filename_1']) logger.info("Reading %s..."%maglimfile_1) maglim1 = read_map(maglimfile_1) maglimfile_2 = join(maglimdir,config['maglim']['filename_2']) logger.info("Reading %s..."%maglimfile_2) maglim2 = read_map(maglimfile_2) # Read the footprint footfile = config['data']['footprint'] logger.info("Reading %s..."%footfile) footprint = read_map(footfile) # Output mask names mask1 = os.path.basename(config['mask']['basename_1']) mask2 = os.path.basename(config['mask']['basename_2']) for band,maglim,base in [(band1,maglim1,mask1),(band2,maglim2,mask2)]: nside_maglim = hp.npix2nside(len(maglim)) if nside_maglim != nside_pixel: msg = "Mask nside different from pixel nside" logger.warning(msg) #raise Exception(msg) pixels = np.nonzero(maglim>0)[0] superpix = superpixel(pixels,nside_maglim,nside_catalog) healpix = np.unique(superpix) for hpx in healpix: outfile = join(outdir,base)%hpx if os.path.exists(outfile) and not force: logger.warning("Found %s; skipping..."%outfile) continue pix = pixels[superpix == hpx] print(hpx, len(pix)) logger.info('Writing %s...'%outfile) data = odict() data['PIXEL']=pix data['MAGLIM']=maglim[pix].astype('f4') data['FRACDET']=footprint[pix].astype('f4') ugali.utils.healpix.write_partial_map(outfile,data,nside_pixel)
python
def split(config,dirname='split',force=False): """ Take a pre-existing maglim map and divide it into chunks consistent with the catalog pixels. """ config = Config(config) filenames = config.getFilenames() #healpix = filenames['pix'].compressed() # Check that things are ok basedir,basename = os.path.split(config['mask']['dirname']) #if basename == dirname: # msg = "Input and output directory are the same." # raise Exception(msg) outdir = mkdir(os.path.join(basedir,dirname)) nside_catalog = config['coords']['nside_catalog'] nside_pixel = config['coords']['nside_pixel'] release = config['data']['release'].lower() band1 = config['catalog']['mag_1_band'] band2 = config['catalog']['mag_2_band'] # Read the magnitude limits maglimdir = config['maglim']['dirname'] maglimfile_1 = join(maglimdir,config['maglim']['filename_1']) logger.info("Reading %s..."%maglimfile_1) maglim1 = read_map(maglimfile_1) maglimfile_2 = join(maglimdir,config['maglim']['filename_2']) logger.info("Reading %s..."%maglimfile_2) maglim2 = read_map(maglimfile_2) # Read the footprint footfile = config['data']['footprint'] logger.info("Reading %s..."%footfile) footprint = read_map(footfile) # Output mask names mask1 = os.path.basename(config['mask']['basename_1']) mask2 = os.path.basename(config['mask']['basename_2']) for band,maglim,base in [(band1,maglim1,mask1),(band2,maglim2,mask2)]: nside_maglim = hp.npix2nside(len(maglim)) if nside_maglim != nside_pixel: msg = "Mask nside different from pixel nside" logger.warning(msg) #raise Exception(msg) pixels = np.nonzero(maglim>0)[0] superpix = superpixel(pixels,nside_maglim,nside_catalog) healpix = np.unique(superpix) for hpx in healpix: outfile = join(outdir,base)%hpx if os.path.exists(outfile) and not force: logger.warning("Found %s; skipping..."%outfile) continue pix = pixels[superpix == hpx] print(hpx, len(pix)) logger.info('Writing %s...'%outfile) data = odict() data['PIXEL']=pix data['MAGLIM']=maglim[pix].astype('f4') data['FRACDET']=footprint[pix].astype('f4') ugali.utils.healpix.write_partial_map(outfile,data,nside_pixel)
[ "def", "split", "(", "config", ",", "dirname", "=", "'split'", ",", "force", "=", "False", ")", ":", "config", "=", "Config", "(", "config", ")", "filenames", "=", "config", ".", "getFilenames", "(", ")", "#healpix = filenames['pix'].compressed()", "# Check that things are ok", "basedir", ",", "basename", "=", "os", ".", "path", ".", "split", "(", "config", "[", "'mask'", "]", "[", "'dirname'", "]", ")", "#if basename == dirname:", "# msg = \"Input and output directory are the same.\"", "# raise Exception(msg)", "outdir", "=", "mkdir", "(", "os", ".", "path", ".", "join", "(", "basedir", ",", "dirname", ")", ")", "nside_catalog", "=", "config", "[", "'coords'", "]", "[", "'nside_catalog'", "]", "nside_pixel", "=", "config", "[", "'coords'", "]", "[", "'nside_pixel'", "]", "release", "=", "config", "[", "'data'", "]", "[", "'release'", "]", ".", "lower", "(", ")", "band1", "=", "config", "[", "'catalog'", "]", "[", "'mag_1_band'", "]", "band2", "=", "config", "[", "'catalog'", "]", "[", "'mag_2_band'", "]", "# Read the magnitude limits", "maglimdir", "=", "config", "[", "'maglim'", "]", "[", "'dirname'", "]", "maglimfile_1", "=", "join", "(", "maglimdir", ",", "config", "[", "'maglim'", "]", "[", "'filename_1'", "]", ")", "logger", ".", "info", "(", "\"Reading %s...\"", "%", "maglimfile_1", ")", "maglim1", "=", "read_map", "(", "maglimfile_1", ")", "maglimfile_2", "=", "join", "(", "maglimdir", ",", "config", "[", "'maglim'", "]", "[", "'filename_2'", "]", ")", "logger", ".", "info", "(", "\"Reading %s...\"", "%", "maglimfile_2", ")", "maglim2", "=", "read_map", "(", "maglimfile_2", ")", "# Read the footprint", "footfile", "=", "config", "[", "'data'", "]", "[", "'footprint'", "]", "logger", ".", "info", "(", "\"Reading %s...\"", "%", "footfile", ")", "footprint", "=", "read_map", "(", "footfile", ")", "# Output mask names", "mask1", "=", "os", ".", "path", ".", "basename", "(", "config", "[", "'mask'", "]", "[", "'basename_1'", "]", ")", "mask2", "=", "os", ".", "path", ".", "basename", "(", "config", "[", "'mask'", "]", "[", "'basename_2'", "]", ")", "for", "band", ",", "maglim", ",", "base", "in", "[", "(", "band1", ",", "maglim1", ",", "mask1", ")", ",", "(", "band2", ",", "maglim2", ",", "mask2", ")", "]", ":", "nside_maglim", "=", "hp", ".", "npix2nside", "(", "len", "(", "maglim", ")", ")", "if", "nside_maglim", "!=", "nside_pixel", ":", "msg", "=", "\"Mask nside different from pixel nside\"", "logger", ".", "warning", "(", "msg", ")", "#raise Exception(msg)", "pixels", "=", "np", ".", "nonzero", "(", "maglim", ">", "0", ")", "[", "0", "]", "superpix", "=", "superpixel", "(", "pixels", ",", "nside_maglim", ",", "nside_catalog", ")", "healpix", "=", "np", ".", "unique", "(", "superpix", ")", "for", "hpx", "in", "healpix", ":", "outfile", "=", "join", "(", "outdir", ",", "base", ")", "%", "hpx", "if", "os", ".", "path", ".", "exists", "(", "outfile", ")", "and", "not", "force", ":", "logger", ".", "warning", "(", "\"Found %s; skipping...\"", "%", "outfile", ")", "continue", "pix", "=", "pixels", "[", "superpix", "==", "hpx", "]", "print", "(", "hpx", ",", "len", "(", "pix", ")", ")", "logger", ".", "info", "(", "'Writing %s...'", "%", "outfile", ")", "data", "=", "odict", "(", ")", "data", "[", "'PIXEL'", "]", "=", "pix", "data", "[", "'MAGLIM'", "]", "=", "maglim", "[", "pix", "]", ".", "astype", "(", "'f4'", ")", "data", "[", "'FRACDET'", "]", "=", "footprint", "[", "pix", "]", ".", "astype", "(", "'f4'", ")", "ugali", ".", "utils", ".", "healpix", ".", "write_partial_map", "(", "outfile", ",", "data", ",", "nside_pixel", ")" ]
Take a pre-existing maglim map and divide it into chunks consistent with the catalog pixels.
[ "Take", "a", "pre", "-", "existing", "maglim", "map", "and", "divide", "it", "into", "chunks", "consistent", "with", "the", "catalog", "pixels", "." ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/preprocess/maglims.py#L266-L332
train
DarkEnergySurvey/ugali
ugali/preprocess/maglims.py
Maglims.run
def run(self,field=None,simple=False,force=False): """ Loop through pixels containing catalog objects and calculate the magnitude limit. This gets a bit convoluted due to all the different pixel resolutions... """ if field is None: fields = [1,2] else: fields = [field] for filenames in self.filenames.compress(~self.filenames.mask['catalog']).data: infile = filenames['catalog'] for f in fields: outfile = filenames['mask_%i'%f] if os.path.exists(outfile) and not force: logger.info("Found %s; skipping..."%outfile) continue pixels,maglims=self.calculate(infile,f,simple) logger.info("Creating %s"%outfile) outdir = mkdir(os.path.dirname(outfile)) data = odict() data['PIXEL']=pixels data['MAGLIM']=maglims.astype('f4') ugali.utils.healpix.write_partial_map(outfile,data, self.nside_pixel)
python
def run(self,field=None,simple=False,force=False): """ Loop through pixels containing catalog objects and calculate the magnitude limit. This gets a bit convoluted due to all the different pixel resolutions... """ if field is None: fields = [1,2] else: fields = [field] for filenames in self.filenames.compress(~self.filenames.mask['catalog']).data: infile = filenames['catalog'] for f in fields: outfile = filenames['mask_%i'%f] if os.path.exists(outfile) and not force: logger.info("Found %s; skipping..."%outfile) continue pixels,maglims=self.calculate(infile,f,simple) logger.info("Creating %s"%outfile) outdir = mkdir(os.path.dirname(outfile)) data = odict() data['PIXEL']=pixels data['MAGLIM']=maglims.astype('f4') ugali.utils.healpix.write_partial_map(outfile,data, self.nside_pixel)
[ "def", "run", "(", "self", ",", "field", "=", "None", ",", "simple", "=", "False", ",", "force", "=", "False", ")", ":", "if", "field", "is", "None", ":", "fields", "=", "[", "1", ",", "2", "]", "else", ":", "fields", "=", "[", "field", "]", "for", "filenames", "in", "self", ".", "filenames", ".", "compress", "(", "~", "self", ".", "filenames", ".", "mask", "[", "'catalog'", "]", ")", ".", "data", ":", "infile", "=", "filenames", "[", "'catalog'", "]", "for", "f", "in", "fields", ":", "outfile", "=", "filenames", "[", "'mask_%i'", "%", "f", "]", "if", "os", ".", "path", ".", "exists", "(", "outfile", ")", "and", "not", "force", ":", "logger", ".", "info", "(", "\"Found %s; skipping...\"", "%", "outfile", ")", "continue", "pixels", ",", "maglims", "=", "self", ".", "calculate", "(", "infile", ",", "f", ",", "simple", ")", "logger", ".", "info", "(", "\"Creating %s\"", "%", "outfile", ")", "outdir", "=", "mkdir", "(", "os", ".", "path", ".", "dirname", "(", "outfile", ")", ")", "data", "=", "odict", "(", ")", "data", "[", "'PIXEL'", "]", "=", "pixels", "data", "[", "'MAGLIM'", "]", "=", "maglims", ".", "astype", "(", "'f4'", ")", "ugali", ".", "utils", ".", "healpix", ".", "write_partial_map", "(", "outfile", ",", "data", ",", "self", ".", "nside_pixel", ")" ]
Loop through pixels containing catalog objects and calculate the magnitude limit. This gets a bit convoluted due to all the different pixel resolutions...
[ "Loop", "through", "pixels", "containing", "catalog", "objects", "and", "calculate", "the", "magnitude", "limit", ".", "This", "gets", "a", "bit", "convoluted", "due", "to", "all", "the", "different", "pixel", "resolutions", "..." ]
21e890b4117fc810afb6fb058e8055d564f03382
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/preprocess/maglims.py#L52-L75
train
consbio/ncdjango
ncdjango/interfaces/arcgis/views.py
ArcGISMapServerMixin.get_variable_set
def get_variable_set(self, variable_set, data): """Filters the given variable set based on request parameters""" if data.get('dynamic_layers'): variable_set = [] # TODO elif data.get('layers'): op, layer_ids = data['layers'].split(':', 1) op = op.lower() layer_ids = [int(x) for x in layer_ids.split(',')] if op in ('show', 'include'): variable_set = [x for x in variable_set if x.index in layer_ids] elif op in ('hide', 'exclude'): variable_set = [x for x in variable_set if x.index not in layer_ids] elif self.service.render_top_layer_only: variable_set = [variable_set[0]] return variable_set
python
def get_variable_set(self, variable_set, data): """Filters the given variable set based on request parameters""" if data.get('dynamic_layers'): variable_set = [] # TODO elif data.get('layers'): op, layer_ids = data['layers'].split(':', 1) op = op.lower() layer_ids = [int(x) for x in layer_ids.split(',')] if op in ('show', 'include'): variable_set = [x for x in variable_set if x.index in layer_ids] elif op in ('hide', 'exclude'): variable_set = [x for x in variable_set if x.index not in layer_ids] elif self.service.render_top_layer_only: variable_set = [variable_set[0]] return variable_set
[ "def", "get_variable_set", "(", "self", ",", "variable_set", ",", "data", ")", ":", "if", "data", ".", "get", "(", "'dynamic_layers'", ")", ":", "variable_set", "=", "[", "]", "# TODO", "elif", "data", ".", "get", "(", "'layers'", ")", ":", "op", ",", "layer_ids", "=", "data", "[", "'layers'", "]", ".", "split", "(", "':'", ",", "1", ")", "op", "=", "op", ".", "lower", "(", ")", "layer_ids", "=", "[", "int", "(", "x", ")", "for", "x", "in", "layer_ids", ".", "split", "(", "','", ")", "]", "if", "op", "in", "(", "'show'", ",", "'include'", ")", ":", "variable_set", "=", "[", "x", "for", "x", "in", "variable_set", "if", "x", ".", "index", "in", "layer_ids", "]", "elif", "op", "in", "(", "'hide'", ",", "'exclude'", ")", ":", "variable_set", "=", "[", "x", "for", "x", "in", "variable_set", "if", "x", ".", "index", "not", "in", "layer_ids", "]", "elif", "self", ".", "service", ".", "render_top_layer_only", ":", "variable_set", "=", "[", "variable_set", "[", "0", "]", "]", "return", "variable_set" ]
Filters the given variable set based on request parameters
[ "Filters", "the", "given", "variable", "set", "based", "on", "request", "parameters" ]
f807bfd1e4083ab29fbc3c4d4418be108383a710
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/interfaces/arcgis/views.py#L207-L224
train
consbio/ncdjango
ncdjango/interfaces/arcgis/views.py
ArcGISMapServerMixin.apply_time_to_configurations
def apply_time_to_configurations(self, configurations, data): """Applies the correct time index to configurations""" time_value = None if data.get('time'): time_value = data['time'] # Only single time values are supported. For extents, just grab the first value if isinstance(data['time'], (tuple, list)): time_value = time_value[0] if time_value: for config in configurations: config.set_time_index_from_datetime(time_value, best_fit=ALLOW_BEST_FIT_TIME_INDEX) return configurations
python
def apply_time_to_configurations(self, configurations, data): """Applies the correct time index to configurations""" time_value = None if data.get('time'): time_value = data['time'] # Only single time values are supported. For extents, just grab the first value if isinstance(data['time'], (tuple, list)): time_value = time_value[0] if time_value: for config in configurations: config.set_time_index_from_datetime(time_value, best_fit=ALLOW_BEST_FIT_TIME_INDEX) return configurations
[ "def", "apply_time_to_configurations", "(", "self", ",", "configurations", ",", "data", ")", ":", "time_value", "=", "None", "if", "data", ".", "get", "(", "'time'", ")", ":", "time_value", "=", "data", "[", "'time'", "]", "# Only single time values are supported. For extents, just grab the first value", "if", "isinstance", "(", "data", "[", "'time'", "]", ",", "(", "tuple", ",", "list", ")", ")", ":", "time_value", "=", "time_value", "[", "0", "]", "if", "time_value", ":", "for", "config", "in", "configurations", ":", "config", ".", "set_time_index_from_datetime", "(", "time_value", ",", "best_fit", "=", "ALLOW_BEST_FIT_TIME_INDEX", ")", "return", "configurations" ]
Applies the correct time index to configurations
[ "Applies", "the", "correct", "time", "index", "to", "configurations" ]
f807bfd1e4083ab29fbc3c4d4418be108383a710
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/interfaces/arcgis/views.py#L226-L241
train
consbio/ncdjango
ncdjango/interfaces/arcgis/views.py
GetImageView._get_form_defaults
def _get_form_defaults(self): """Returns default values for the get image form""" return { 'response_format': 'html', 'bbox': self.service.full_extent, 'size': '400,400', 'dpi': 200, 'image_projection': pyproj.Proj(str(self.service.projection)), 'bbox_projection': pyproj.Proj(str(self.service.projection)), 'image_format': 'png', 'transparent': True }
python
def _get_form_defaults(self): """Returns default values for the get image form""" return { 'response_format': 'html', 'bbox': self.service.full_extent, 'size': '400,400', 'dpi': 200, 'image_projection': pyproj.Proj(str(self.service.projection)), 'bbox_projection': pyproj.Proj(str(self.service.projection)), 'image_format': 'png', 'transparent': True }
[ "def", "_get_form_defaults", "(", "self", ")", ":", "return", "{", "'response_format'", ":", "'html'", ",", "'bbox'", ":", "self", ".", "service", ".", "full_extent", ",", "'size'", ":", "'400,400'", ",", "'dpi'", ":", "200", ",", "'image_projection'", ":", "pyproj", ".", "Proj", "(", "str", "(", "self", ".", "service", ".", "projection", ")", ")", ",", "'bbox_projection'", ":", "pyproj", ".", "Proj", "(", "str", "(", "self", ".", "service", ".", "projection", ")", ")", ",", "'image_format'", ":", "'png'", ",", "'transparent'", ":", "True", "}" ]
Returns default values for the get image form
[ "Returns", "default", "values", "for", "the", "get", "image", "form" ]
f807bfd1e4083ab29fbc3c4d4418be108383a710
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/interfaces/arcgis/views.py#L247-L259
train
consbio/ncdjango
ncdjango/interfaces/arcgis/views.py
GetImageView.get_render_configurations
def get_render_configurations(self, request, **kwargs): """Render image interface""" data = self.process_form_data(self._get_form_defaults(), kwargs) variable_set = self.get_variable_set(self.service.variable_set.order_by('index'), data) base_config = ImageConfiguration( extent=data['bbox'], size=data['size'], image_format=data['image_format'], background_color=TRANSPARENT_BACKGROUND_COLOR if data.get('transparent') else DEFAULT_BACKGROUND_COLOR ) return base_config, self.apply_time_to_configurations([RenderConfiguration(v) for v in variable_set], data)
python
def get_render_configurations(self, request, **kwargs): """Render image interface""" data = self.process_form_data(self._get_form_defaults(), kwargs) variable_set = self.get_variable_set(self.service.variable_set.order_by('index'), data) base_config = ImageConfiguration( extent=data['bbox'], size=data['size'], image_format=data['image_format'], background_color=TRANSPARENT_BACKGROUND_COLOR if data.get('transparent') else DEFAULT_BACKGROUND_COLOR ) return base_config, self.apply_time_to_configurations([RenderConfiguration(v) for v in variable_set], data)
[ "def", "get_render_configurations", "(", "self", ",", "request", ",", "*", "*", "kwargs", ")", ":", "data", "=", "self", ".", "process_form_data", "(", "self", ".", "_get_form_defaults", "(", ")", ",", "kwargs", ")", "variable_set", "=", "self", ".", "get_variable_set", "(", "self", ".", "service", ".", "variable_set", ".", "order_by", "(", "'index'", ")", ",", "data", ")", "base_config", "=", "ImageConfiguration", "(", "extent", "=", "data", "[", "'bbox'", "]", ",", "size", "=", "data", "[", "'size'", "]", ",", "image_format", "=", "data", "[", "'image_format'", "]", ",", "background_color", "=", "TRANSPARENT_BACKGROUND_COLOR", "if", "data", ".", "get", "(", "'transparent'", ")", "else", "DEFAULT_BACKGROUND_COLOR", ")", "return", "base_config", ",", "self", ".", "apply_time_to_configurations", "(", "[", "RenderConfiguration", "(", "v", ")", "for", "v", "in", "variable_set", "]", ",", "data", ")" ]
Render image interface
[ "Render", "image", "interface" ]
f807bfd1e4083ab29fbc3c4d4418be108383a710
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/interfaces/arcgis/views.py#L284-L297
train
consbio/ncdjango
ncdjango/interfaces/arcgis/views.py
IdentifyView._get_form_defaults
def _get_form_defaults(self): """Returns default values for the identify form""" return { 'response_format': 'html', 'geometry_type': 'esriGeometryPoint', 'projection': pyproj.Proj(str(self.service.projection)), 'return_geometry': True, 'maximum_allowable_offset': 2, 'geometry_precision': 3, 'return_z': False, 'return_m': False }
python
def _get_form_defaults(self): """Returns default values for the identify form""" return { 'response_format': 'html', 'geometry_type': 'esriGeometryPoint', 'projection': pyproj.Proj(str(self.service.projection)), 'return_geometry': True, 'maximum_allowable_offset': 2, 'geometry_precision': 3, 'return_z': False, 'return_m': False }
[ "def", "_get_form_defaults", "(", "self", ")", ":", "return", "{", "'response_format'", ":", "'html'", ",", "'geometry_type'", ":", "'esriGeometryPoint'", ",", "'projection'", ":", "pyproj", ".", "Proj", "(", "str", "(", "self", ".", "service", ".", "projection", ")", ")", ",", "'return_geometry'", ":", "True", ",", "'maximum_allowable_offset'", ":", "2", ",", "'geometry_precision'", ":", "3", ",", "'return_z'", ":", "False", ",", "'return_m'", ":", "False", "}" ]
Returns default values for the identify form
[ "Returns", "default", "values", "for", "the", "identify", "form" ]
f807bfd1e4083ab29fbc3c4d4418be108383a710
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/interfaces/arcgis/views.py#L303-L315
train
juju/theblues
theblues/terms.py
Terms.get_terms
def get_terms(self, name, revision=None): """ Retrieve a specific term and condition. @param name of the terms. @param revision of the terms, if none provided it will return the latest. @return The list of terms. @raise ServerError """ url = '{}terms/{}'.format(self.url, name) if revision: url = '{}?revision={}'.format(url, revision) json = make_request(url, timeout=self.timeout, client=self._client) try: # This is always a list of one element. data = json[0] return Term(name=data['name'], title=data.get('title'), revision=data['revision'], created_on=datetime.datetime.strptime( data['created-on'], "%Y-%m-%dT%H:%M:%SZ" ), content=data['content']) except (KeyError, TypeError, ValueError, IndexError) as err: log.info( 'cannot process terms: invalid JSON response: {!r}'.format( json)) raise ServerError( 'unable to get terms for {}: {}'.format(name, err))
python
def get_terms(self, name, revision=None): """ Retrieve a specific term and condition. @param name of the terms. @param revision of the terms, if none provided it will return the latest. @return The list of terms. @raise ServerError """ url = '{}terms/{}'.format(self.url, name) if revision: url = '{}?revision={}'.format(url, revision) json = make_request(url, timeout=self.timeout, client=self._client) try: # This is always a list of one element. data = json[0] return Term(name=data['name'], title=data.get('title'), revision=data['revision'], created_on=datetime.datetime.strptime( data['created-on'], "%Y-%m-%dT%H:%M:%SZ" ), content=data['content']) except (KeyError, TypeError, ValueError, IndexError) as err: log.info( 'cannot process terms: invalid JSON response: {!r}'.format( json)) raise ServerError( 'unable to get terms for {}: {}'.format(name, err))
[ "def", "get_terms", "(", "self", ",", "name", ",", "revision", "=", "None", ")", ":", "url", "=", "'{}terms/{}'", ".", "format", "(", "self", ".", "url", ",", "name", ")", "if", "revision", ":", "url", "=", "'{}?revision={}'", ".", "format", "(", "url", ",", "revision", ")", "json", "=", "make_request", "(", "url", ",", "timeout", "=", "self", ".", "timeout", ",", "client", "=", "self", ".", "_client", ")", "try", ":", "# This is always a list of one element.", "data", "=", "json", "[", "0", "]", "return", "Term", "(", "name", "=", "data", "[", "'name'", "]", ",", "title", "=", "data", ".", "get", "(", "'title'", ")", ",", "revision", "=", "data", "[", "'revision'", "]", ",", "created_on", "=", "datetime", ".", "datetime", ".", "strptime", "(", "data", "[", "'created-on'", "]", ",", "\"%Y-%m-%dT%H:%M:%SZ\"", ")", ",", "content", "=", "data", "[", "'content'", "]", ")", "except", "(", "KeyError", ",", "TypeError", ",", "ValueError", ",", "IndexError", ")", "as", "err", ":", "log", ".", "info", "(", "'cannot process terms: invalid JSON response: {!r}'", ".", "format", "(", "json", ")", ")", "raise", "ServerError", "(", "'unable to get terms for {}: {}'", ".", "format", "(", "name", ",", "err", ")", ")" ]
Retrieve a specific term and condition. @param name of the terms. @param revision of the terms, if none provided it will return the latest. @return The list of terms. @raise ServerError
[ "Retrieve", "a", "specific", "term", "and", "condition", "." ]
f4431f29e43d04fc32f38f4f86cea45cd4e6ae98
https://github.com/juju/theblues/blob/f4431f29e43d04fc32f38f4f86cea45cd4e6ae98/theblues/terms.py#L38-L67
train
consbio/ncdjango
ncdjango/views.py
NetCdfDatasetMixin.open_dataset
def open_dataset(self, service): """Opens and returns the NetCDF dataset associated with a service, or returns a previously-opened dataset""" if not self.dataset: path = os.path.join(SERVICE_DATA_ROOT, service.data_path) self.dataset = netCDF4.Dataset(path, 'r') return self.dataset
python
def open_dataset(self, service): """Opens and returns the NetCDF dataset associated with a service, or returns a previously-opened dataset""" if not self.dataset: path = os.path.join(SERVICE_DATA_ROOT, service.data_path) self.dataset = netCDF4.Dataset(path, 'r') return self.dataset
[ "def", "open_dataset", "(", "self", ",", "service", ")", ":", "if", "not", "self", ".", "dataset", ":", "path", "=", "os", ".", "path", ".", "join", "(", "SERVICE_DATA_ROOT", ",", "service", ".", "data_path", ")", "self", ".", "dataset", "=", "netCDF4", ".", "Dataset", "(", "path", ",", "'r'", ")", "return", "self", ".", "dataset" ]
Opens and returns the NetCDF dataset associated with a service, or returns a previously-opened dataset
[ "Opens", "and", "returns", "the", "NetCDF", "dataset", "associated", "with", "a", "service", "or", "returns", "a", "previously", "-", "opened", "dataset" ]
f807bfd1e4083ab29fbc3c4d4418be108383a710
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/views.py#L80-L86
train
consbio/ncdjango
ncdjango/views.py
GetImageViewBase._normalize_bbox
def _normalize_bbox(self, bbox, size): """Returns this bbox normalized to match the ratio of the given size.""" bbox_ratio = float(bbox.width) / float(bbox.height) size_ratio = float(size[0]) / float(size[1]) if round(size_ratio, 4) == round(bbox_ratio, 4): return bbox else: if bbox.height * size_ratio >= bbox.width: diff = bbox.height*size_ratio - bbox.width return BBox((bbox.xmin - diff/2, bbox.ymin, bbox.xmax + diff/2, bbox.ymax), bbox.projection) else: diff = abs(bbox.width/size_ratio - bbox.height) return BBox((bbox.xmin, bbox.ymin - diff/2, bbox.xmax, bbox.ymax + diff/2), bbox.projection)
python
def _normalize_bbox(self, bbox, size): """Returns this bbox normalized to match the ratio of the given size.""" bbox_ratio = float(bbox.width) / float(bbox.height) size_ratio = float(size[0]) / float(size[1]) if round(size_ratio, 4) == round(bbox_ratio, 4): return bbox else: if bbox.height * size_ratio >= bbox.width: diff = bbox.height*size_ratio - bbox.width return BBox((bbox.xmin - diff/2, bbox.ymin, bbox.xmax + diff/2, bbox.ymax), bbox.projection) else: diff = abs(bbox.width/size_ratio - bbox.height) return BBox((bbox.xmin, bbox.ymin - diff/2, bbox.xmax, bbox.ymax + diff/2), bbox.projection)
[ "def", "_normalize_bbox", "(", "self", ",", "bbox", ",", "size", ")", ":", "bbox_ratio", "=", "float", "(", "bbox", ".", "width", ")", "/", "float", "(", "bbox", ".", "height", ")", "size_ratio", "=", "float", "(", "size", "[", "0", "]", ")", "/", "float", "(", "size", "[", "1", "]", ")", "if", "round", "(", "size_ratio", ",", "4", ")", "==", "round", "(", "bbox_ratio", ",", "4", ")", ":", "return", "bbox", "else", ":", "if", "bbox", ".", "height", "*", "size_ratio", ">=", "bbox", ".", "width", ":", "diff", "=", "bbox", ".", "height", "*", "size_ratio", "-", "bbox", ".", "width", "return", "BBox", "(", "(", "bbox", ".", "xmin", "-", "diff", "/", "2", ",", "bbox", ".", "ymin", ",", "bbox", ".", "xmax", "+", "diff", "/", "2", ",", "bbox", ".", "ymax", ")", ",", "bbox", ".", "projection", ")", "else", ":", "diff", "=", "abs", "(", "bbox", ".", "width", "/", "size_ratio", "-", "bbox", ".", "height", ")", "return", "BBox", "(", "(", "bbox", ".", "xmin", ",", "bbox", ".", "ymin", "-", "diff", "/", "2", ",", "bbox", ".", "xmax", ",", "bbox", ".", "ymax", "+", "diff", "/", "2", ")", ",", "bbox", ".", "projection", ")" ]
Returns this bbox normalized to match the ratio of the given size.
[ "Returns", "this", "bbox", "normalized", "to", "match", "the", "ratio", "of", "the", "given", "size", "." ]
f807bfd1e4083ab29fbc3c4d4418be108383a710
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/views.py#L148-L162
train