repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
sequencelengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
sequencelengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
rsheftel/raccoon
raccoon/dataframe.py
DataFrame.delete_columns
def delete_columns(self, columns): """ Delete columns from the DataFrame :param columns: list of columns to delete :return: nothing """ columns = [columns] if not isinstance(columns, (list, blist)) else columns if not all([x in self._columns for x in columns]): raise ValueError('all columns must be in current columns') for column in columns: c = self._columns.index(column) del self._data[c] del self._columns[c] if not len(self._data): # if all the columns have been deleted, remove index self.index = list()
python
def delete_columns(self, columns): """ Delete columns from the DataFrame :param columns: list of columns to delete :return: nothing """ columns = [columns] if not isinstance(columns, (list, blist)) else columns if not all([x in self._columns for x in columns]): raise ValueError('all columns must be in current columns') for column in columns: c = self._columns.index(column) del self._data[c] del self._columns[c] if not len(self._data): # if all the columns have been deleted, remove index self.index = list()
[ "def", "delete_columns", "(", "self", ",", "columns", ")", ":", "columns", "=", "[", "columns", "]", "if", "not", "isinstance", "(", "columns", ",", "(", "list", ",", "blist", ")", ")", "else", "columns", "if", "not", "all", "(", "[", "x", "in", "self", ".", "_columns", "for", "x", "in", "columns", "]", ")", ":", "raise", "ValueError", "(", "'all columns must be in current columns'", ")", "for", "column", "in", "columns", ":", "c", "=", "self", ".", "_columns", ".", "index", "(", "column", ")", "del", "self", ".", "_data", "[", "c", "]", "del", "self", ".", "_columns", "[", "c", "]", "if", "not", "len", "(", "self", ".", "_data", ")", ":", "# if all the columns have been deleted, remove index", "self", ".", "index", "=", "list", "(", ")" ]
Delete columns from the DataFrame :param columns: list of columns to delete :return: nothing
[ "Delete", "columns", "from", "the", "DataFrame" ]
train
https://github.com/rsheftel/raccoon/blob/e5c4b5fb933b51f33aff11e8168c39790e9a7c75/raccoon/dataframe.py#L945-L960
rsheftel/raccoon
raccoon/dataframe.py
DataFrame.sort_index
def sort_index(self): """ Sort the DataFrame by the index. The sort modifies the DataFrame inplace :return: nothing """ sort = sorted_list_indexes(self._index) # sort index self._index = blist([self._index[x] for x in sort]) if self._blist else [self._index[x] for x in sort] # each column for c in range(len(self._data)): self._data[c] = blist([self._data[c][i] for i in sort]) if self._blist else [self._data[c][i] for i in sort]
python
def sort_index(self): """ Sort the DataFrame by the index. The sort modifies the DataFrame inplace :return: nothing """ sort = sorted_list_indexes(self._index) # sort index self._index = blist([self._index[x] for x in sort]) if self._blist else [self._index[x] for x in sort] # each column for c in range(len(self._data)): self._data[c] = blist([self._data[c][i] for i in sort]) if self._blist else [self._data[c][i] for i in sort]
[ "def", "sort_index", "(", "self", ")", ":", "sort", "=", "sorted_list_indexes", "(", "self", ".", "_index", ")", "# sort index", "self", ".", "_index", "=", "blist", "(", "[", "self", ".", "_index", "[", "x", "]", "for", "x", "in", "sort", "]", ")", "if", "self", ".", "_blist", "else", "[", "self", ".", "_index", "[", "x", "]", "for", "x", "in", "sort", "]", "# each column", "for", "c", "in", "range", "(", "len", "(", "self", ".", "_data", ")", ")", ":", "self", ".", "_data", "[", "c", "]", "=", "blist", "(", "[", "self", ".", "_data", "[", "c", "]", "[", "i", "]", "for", "i", "in", "sort", "]", ")", "if", "self", ".", "_blist", "else", "[", "self", ".", "_data", "[", "c", "]", "[", "i", "]", "for", "i", "in", "sort", "]" ]
Sort the DataFrame by the index. The sort modifies the DataFrame inplace :return: nothing
[ "Sort", "the", "DataFrame", "by", "the", "index", ".", "The", "sort", "modifies", "the", "DataFrame", "inplace" ]
train
https://github.com/rsheftel/raccoon/blob/e5c4b5fb933b51f33aff11e8168c39790e9a7c75/raccoon/dataframe.py#L962-L973
rsheftel/raccoon
raccoon/dataframe.py
DataFrame.sort_columns
def sort_columns(self, column, key=None, reverse=False): """ Sort the DataFrame by one of the columns. The sort modifies the DataFrame inplace. The key and reverse parameters have the same meaning as for the built-in sort() function. :param column: column name to use for the sort :param key: if not None then a function of one argument that is used to extract a comparison key from each list element :param reverse: if True then the list elements are sort as if each comparison were reversed. :return: nothing """ if isinstance(column, (list, blist)): raise TypeError('Can only sort by a single column ') sort = sorted_list_indexes(self._data[self._columns.index(column)], key, reverse) # sort index self._index = blist([self._index[x] for x in sort]) if self._blist else [self._index[x] for x in sort] # each column for c in range(len(self._data)): self._data[c] = blist([self._data[c][i] for i in sort]) if self._blist else [self._data[c][i] for i in sort]
python
def sort_columns(self, column, key=None, reverse=False): """ Sort the DataFrame by one of the columns. The sort modifies the DataFrame inplace. The key and reverse parameters have the same meaning as for the built-in sort() function. :param column: column name to use for the sort :param key: if not None then a function of one argument that is used to extract a comparison key from each list element :param reverse: if True then the list elements are sort as if each comparison were reversed. :return: nothing """ if isinstance(column, (list, blist)): raise TypeError('Can only sort by a single column ') sort = sorted_list_indexes(self._data[self._columns.index(column)], key, reverse) # sort index self._index = blist([self._index[x] for x in sort]) if self._blist else [self._index[x] for x in sort] # each column for c in range(len(self._data)): self._data[c] = blist([self._data[c][i] for i in sort]) if self._blist else [self._data[c][i] for i in sort]
[ "def", "sort_columns", "(", "self", ",", "column", ",", "key", "=", "None", ",", "reverse", "=", "False", ")", ":", "if", "isinstance", "(", "column", ",", "(", "list", ",", "blist", ")", ")", ":", "raise", "TypeError", "(", "'Can only sort by a single column '", ")", "sort", "=", "sorted_list_indexes", "(", "self", ".", "_data", "[", "self", ".", "_columns", ".", "index", "(", "column", ")", "]", ",", "key", ",", "reverse", ")", "# sort index", "self", ".", "_index", "=", "blist", "(", "[", "self", ".", "_index", "[", "x", "]", "for", "x", "in", "sort", "]", ")", "if", "self", ".", "_blist", "else", "[", "self", ".", "_index", "[", "x", "]", "for", "x", "in", "sort", "]", "# each column", "for", "c", "in", "range", "(", "len", "(", "self", ".", "_data", ")", ")", ":", "self", ".", "_data", "[", "c", "]", "=", "blist", "(", "[", "self", ".", "_data", "[", "c", "]", "[", "i", "]", "for", "i", "in", "sort", "]", ")", "if", "self", ".", "_blist", "else", "[", "self", ".", "_data", "[", "c", "]", "[", "i", "]", "for", "i", "in", "sort", "]" ]
Sort the DataFrame by one of the columns. The sort modifies the DataFrame inplace. The key and reverse parameters have the same meaning as for the built-in sort() function. :param column: column name to use for the sort :param key: if not None then a function of one argument that is used to extract a comparison key from each list element :param reverse: if True then the list elements are sort as if each comparison were reversed. :return: nothing
[ "Sort", "the", "DataFrame", "by", "one", "of", "the", "columns", ".", "The", "sort", "modifies", "the", "DataFrame", "inplace", ".", "The", "key", "and", "reverse", "parameters", "have", "the", "same", "meaning", "as", "for", "the", "built", "-", "in", "sort", "()", "function", "." ]
train
https://github.com/rsheftel/raccoon/blob/e5c4b5fb933b51f33aff11e8168c39790e9a7c75/raccoon/dataframe.py#L975-L993
rsheftel/raccoon
raccoon/dataframe.py
DataFrame.validate_integrity
def validate_integrity(self): """ Validate the integrity of the DataFrame. This checks that the indexes, column names and internal data are not corrupted. Will raise an error if there is a problem. :return: nothing """ self._validate_columns(self._columns) self._validate_index(self._index) self._validate_data()
python
def validate_integrity(self): """ Validate the integrity of the DataFrame. This checks that the indexes, column names and internal data are not corrupted. Will raise an error if there is a problem. :return: nothing """ self._validate_columns(self._columns) self._validate_index(self._index) self._validate_data()
[ "def", "validate_integrity", "(", "self", ")", ":", "self", ".", "_validate_columns", "(", "self", ".", "_columns", ")", "self", ".", "_validate_index", "(", "self", ".", "_index", ")", "self", ".", "_validate_data", "(", ")" ]
Validate the integrity of the DataFrame. This checks that the indexes, column names and internal data are not corrupted. Will raise an error if there is a problem. :return: nothing
[ "Validate", "the", "integrity", "of", "the", "DataFrame", ".", "This", "checks", "that", "the", "indexes", "column", "names", "and", "internal", "data", "are", "not", "corrupted", ".", "Will", "raise", "an", "error", "if", "there", "is", "a", "problem", "." ]
train
https://github.com/rsheftel/raccoon/blob/e5c4b5fb933b51f33aff11e8168c39790e9a7c75/raccoon/dataframe.py#L1016-L1025
rsheftel/raccoon
raccoon/dataframe.py
DataFrame.append
def append(self, data_frame): """ Append another DataFrame to this DataFrame. If the new data_frame has columns that are not in the current DataFrame then new columns will be created. All of the indexes in the data_frame must be different from the current indexes or will raise an error. :param data_frame: DataFrame to append :return: nothing """ if len(data_frame) == 0: # empty DataFrame, do nothing return data_frame_index = data_frame.index combined_index = self._index + data_frame_index if len(set(combined_index)) != len(combined_index): raise ValueError('duplicate indexes in DataFrames') for c, column in enumerate(data_frame.columns): if PYTHON3: self.set(indexes=data_frame_index, columns=column, values=data_frame.data[c].copy()) else: self.set(indexes=data_frame_index, columns=column, values=data_frame.data[c][:])
python
def append(self, data_frame): """ Append another DataFrame to this DataFrame. If the new data_frame has columns that are not in the current DataFrame then new columns will be created. All of the indexes in the data_frame must be different from the current indexes or will raise an error. :param data_frame: DataFrame to append :return: nothing """ if len(data_frame) == 0: # empty DataFrame, do nothing return data_frame_index = data_frame.index combined_index = self._index + data_frame_index if len(set(combined_index)) != len(combined_index): raise ValueError('duplicate indexes in DataFrames') for c, column in enumerate(data_frame.columns): if PYTHON3: self.set(indexes=data_frame_index, columns=column, values=data_frame.data[c].copy()) else: self.set(indexes=data_frame_index, columns=column, values=data_frame.data[c][:])
[ "def", "append", "(", "self", ",", "data_frame", ")", ":", "if", "len", "(", "data_frame", ")", "==", "0", ":", "# empty DataFrame, do nothing", "return", "data_frame_index", "=", "data_frame", ".", "index", "combined_index", "=", "self", ".", "_index", "+", "data_frame_index", "if", "len", "(", "set", "(", "combined_index", ")", ")", "!=", "len", "(", "combined_index", ")", ":", "raise", "ValueError", "(", "'duplicate indexes in DataFrames'", ")", "for", "c", ",", "column", "in", "enumerate", "(", "data_frame", ".", "columns", ")", ":", "if", "PYTHON3", ":", "self", ".", "set", "(", "indexes", "=", "data_frame_index", ",", "columns", "=", "column", ",", "values", "=", "data_frame", ".", "data", "[", "c", "]", ".", "copy", "(", ")", ")", "else", ":", "self", ".", "set", "(", "indexes", "=", "data_frame_index", ",", "columns", "=", "column", ",", "values", "=", "data_frame", ".", "data", "[", "c", "]", "[", ":", "]", ")" ]
Append another DataFrame to this DataFrame. If the new data_frame has columns that are not in the current DataFrame then new columns will be created. All of the indexes in the data_frame must be different from the current indexes or will raise an error. :param data_frame: DataFrame to append :return: nothing
[ "Append", "another", "DataFrame", "to", "this", "DataFrame", ".", "If", "the", "new", "data_frame", "has", "columns", "that", "are", "not", "in", "the", "current", "DataFrame", "then", "new", "columns", "will", "be", "created", ".", "All", "of", "the", "indexes", "in", "the", "data_frame", "must", "be", "different", "from", "the", "current", "indexes", "or", "will", "raise", "an", "error", "." ]
train
https://github.com/rsheftel/raccoon/blob/e5c4b5fb933b51f33aff11e8168c39790e9a7c75/raccoon/dataframe.py#L1027-L1047
rsheftel/raccoon
raccoon/dataframe.py
DataFrame.add
def add(self, left_column, right_column, indexes=None): """ Math helper method that adds element-wise two columns. If indexes are not None then will only perform the math on that sub-set of the columns. :param left_column: first column name :param right_column: second column name :param indexes: list of index values or list of booleans. If a list of booleans then the list must be the same\ length as the DataFrame :return: list """ left_list, right_list = self._get_lists(left_column, right_column, indexes) return [l + r for l, r in zip(left_list, right_list)]
python
def add(self, left_column, right_column, indexes=None): """ Math helper method that adds element-wise two columns. If indexes are not None then will only perform the math on that sub-set of the columns. :param left_column: first column name :param right_column: second column name :param indexes: list of index values or list of booleans. If a list of booleans then the list must be the same\ length as the DataFrame :return: list """ left_list, right_list = self._get_lists(left_column, right_column, indexes) return [l + r for l, r in zip(left_list, right_list)]
[ "def", "add", "(", "self", ",", "left_column", ",", "right_column", ",", "indexes", "=", "None", ")", ":", "left_list", ",", "right_list", "=", "self", ".", "_get_lists", "(", "left_column", ",", "right_column", ",", "indexes", ")", "return", "[", "l", "+", "r", "for", "l", ",", "r", "in", "zip", "(", "left_list", ",", "right_list", ")", "]" ]
Math helper method that adds element-wise two columns. If indexes are not None then will only perform the math on that sub-set of the columns. :param left_column: first column name :param right_column: second column name :param indexes: list of index values or list of booleans. If a list of booleans then the list must be the same\ length as the DataFrame :return: list
[ "Math", "helper", "method", "that", "adds", "element", "-", "wise", "two", "columns", ".", "If", "indexes", "are", "not", "None", "then", "will", "only", "perform", "the", "math", "on", "that", "sub", "-", "set", "of", "the", "columns", "." ]
train
https://github.com/rsheftel/raccoon/blob/e5c4b5fb933b51f33aff11e8168c39790e9a7c75/raccoon/dataframe.py#L1070-L1082
rsheftel/raccoon
raccoon/dataframe.py
DataFrame.isin
def isin(self, column, compare_list): """ Returns a boolean list where each elements is whether that element in the column is in the compare_list. :param column: single column name, does not work for multiple columns :param compare_list: list of items to compare to :return: list of booleans """ return [x in compare_list for x in self._data[self._columns.index(column)]]
python
def isin(self, column, compare_list): """ Returns a boolean list where each elements is whether that element in the column is in the compare_list. :param column: single column name, does not work for multiple columns :param compare_list: list of items to compare to :return: list of booleans """ return [x in compare_list for x in self._data[self._columns.index(column)]]
[ "def", "isin", "(", "self", ",", "column", ",", "compare_list", ")", ":", "return", "[", "x", "in", "compare_list", "for", "x", "in", "self", ".", "_data", "[", "self", ".", "_columns", ".", "index", "(", "column", ")", "]", "]" ]
Returns a boolean list where each elements is whether that element in the column is in the compare_list. :param column: single column name, does not work for multiple columns :param compare_list: list of items to compare to :return: list of booleans
[ "Returns", "a", "boolean", "list", "where", "each", "elements", "is", "whether", "that", "element", "in", "the", "column", "is", "in", "the", "compare_list", "." ]
train
https://github.com/rsheftel/raccoon/blob/e5c4b5fb933b51f33aff11e8168c39790e9a7c75/raccoon/dataframe.py#L1126-L1134
rsheftel/raccoon
raccoon/dataframe.py
DataFrame.iterrows
def iterrows(self, index=True): """ Iterates over DataFrame rows as dictionary of the values. The index will be included. :param index: if True include the index in the results :return: dictionary """ for i in range(len(self._index)): row = {self._index_name: self._index[i]} if index else dict() for c, col in enumerate(self._columns): row[col] = self._data[c][i] yield row
python
def iterrows(self, index=True): """ Iterates over DataFrame rows as dictionary of the values. The index will be included. :param index: if True include the index in the results :return: dictionary """ for i in range(len(self._index)): row = {self._index_name: self._index[i]} if index else dict() for c, col in enumerate(self._columns): row[col] = self._data[c][i] yield row
[ "def", "iterrows", "(", "self", ",", "index", "=", "True", ")", ":", "for", "i", "in", "range", "(", "len", "(", "self", ".", "_index", ")", ")", ":", "row", "=", "{", "self", ".", "_index_name", ":", "self", ".", "_index", "[", "i", "]", "}", "if", "index", "else", "dict", "(", ")", "for", "c", ",", "col", "in", "enumerate", "(", "self", ".", "_columns", ")", ":", "row", "[", "col", "]", "=", "self", ".", "_data", "[", "c", "]", "[", "i", "]", "yield", "row" ]
Iterates over DataFrame rows as dictionary of the values. The index will be included. :param index: if True include the index in the results :return: dictionary
[ "Iterates", "over", "DataFrame", "rows", "as", "dictionary", "of", "the", "values", ".", "The", "index", "will", "be", "included", "." ]
train
https://github.com/rsheftel/raccoon/blob/e5c4b5fb933b51f33aff11e8168c39790e9a7c75/raccoon/dataframe.py#L1136-L1147
rsheftel/raccoon
raccoon/dataframe.py
DataFrame.itertuples
def itertuples(self, index=True, name='Raccoon'): """ Iterates over DataFrame rows as tuple of the values. :param index: if True then include the index :param name: name of the namedtuple :return: namedtuple """ fields = [self._index_name] if index else list() fields.extend(self._columns) row_tuple = namedtuple(name, fields) for i in range(len(self._index)): row = {self._index_name: self._index[i]} if index else dict() for c, col in enumerate(self._columns): row[col] = self._data[c][i] yield row_tuple(**row)
python
def itertuples(self, index=True, name='Raccoon'): """ Iterates over DataFrame rows as tuple of the values. :param index: if True then include the index :param name: name of the namedtuple :return: namedtuple """ fields = [self._index_name] if index else list() fields.extend(self._columns) row_tuple = namedtuple(name, fields) for i in range(len(self._index)): row = {self._index_name: self._index[i]} if index else dict() for c, col in enumerate(self._columns): row[col] = self._data[c][i] yield row_tuple(**row)
[ "def", "itertuples", "(", "self", ",", "index", "=", "True", ",", "name", "=", "'Raccoon'", ")", ":", "fields", "=", "[", "self", ".", "_index_name", "]", "if", "index", "else", "list", "(", ")", "fields", ".", "extend", "(", "self", ".", "_columns", ")", "row_tuple", "=", "namedtuple", "(", "name", ",", "fields", ")", "for", "i", "in", "range", "(", "len", "(", "self", ".", "_index", ")", ")", ":", "row", "=", "{", "self", ".", "_index_name", ":", "self", ".", "_index", "[", "i", "]", "}", "if", "index", "else", "dict", "(", ")", "for", "c", ",", "col", "in", "enumerate", "(", "self", ".", "_columns", ")", ":", "row", "[", "col", "]", "=", "self", ".", "_data", "[", "c", "]", "[", "i", "]", "yield", "row_tuple", "(", "*", "*", "row", ")" ]
Iterates over DataFrame rows as tuple of the values. :param index: if True then include the index :param name: name of the namedtuple :return: namedtuple
[ "Iterates", "over", "DataFrame", "rows", "as", "tuple", "of", "the", "values", "." ]
train
https://github.com/rsheftel/raccoon/blob/e5c4b5fb933b51f33aff11e8168c39790e9a7c75/raccoon/dataframe.py#L1149-L1164
rsheftel/raccoon
raccoon/dataframe.py
DataFrame.reset_index
def reset_index(self, drop=False): """ Resets the index of the DataFrame to simple integer list and the index name to 'index'. If drop is True then the existing index is dropped, if drop is False then the current index is made a column in the DataFrame with the index name the name of the column. If the index is a tuple multi-index then each element of the tuple is converted into a separate column. If the index name was 'index' then the column name will be 'index_0' to not conflict on print(). :param drop: if True then the current index is dropped, if False then index converted to columns :return: nothing """ if not drop: if isinstance(self.index_name, tuple): index_data = list(map(list, zip(*self._index))) for i in range(len(self.index_name)): self.set_column(column=self.index_name[i], values=index_data[i]) else: col_name = self.index_name if self.index_name is not 'index' else 'index_0' self.set_column(column=col_name, values=self._index) self.index = list(range(self.__len__())) self.index_name = 'index'
python
def reset_index(self, drop=False): """ Resets the index of the DataFrame to simple integer list and the index name to 'index'. If drop is True then the existing index is dropped, if drop is False then the current index is made a column in the DataFrame with the index name the name of the column. If the index is a tuple multi-index then each element of the tuple is converted into a separate column. If the index name was 'index' then the column name will be 'index_0' to not conflict on print(). :param drop: if True then the current index is dropped, if False then index converted to columns :return: nothing """ if not drop: if isinstance(self.index_name, tuple): index_data = list(map(list, zip(*self._index))) for i in range(len(self.index_name)): self.set_column(column=self.index_name[i], values=index_data[i]) else: col_name = self.index_name if self.index_name is not 'index' else 'index_0' self.set_column(column=col_name, values=self._index) self.index = list(range(self.__len__())) self.index_name = 'index'
[ "def", "reset_index", "(", "self", ",", "drop", "=", "False", ")", ":", "if", "not", "drop", ":", "if", "isinstance", "(", "self", ".", "index_name", ",", "tuple", ")", ":", "index_data", "=", "list", "(", "map", "(", "list", ",", "zip", "(", "*", "self", ".", "_index", ")", ")", ")", "for", "i", "in", "range", "(", "len", "(", "self", ".", "index_name", ")", ")", ":", "self", ".", "set_column", "(", "column", "=", "self", ".", "index_name", "[", "i", "]", ",", "values", "=", "index_data", "[", "i", "]", ")", "else", ":", "col_name", "=", "self", ".", "index_name", "if", "self", ".", "index_name", "is", "not", "'index'", "else", "'index_0'", "self", ".", "set_column", "(", "column", "=", "col_name", ",", "values", "=", "self", ".", "_index", ")", "self", ".", "index", "=", "list", "(", "range", "(", "self", ".", "__len__", "(", ")", ")", ")", "self", ".", "index_name", "=", "'index'" ]
Resets the index of the DataFrame to simple integer list and the index name to 'index'. If drop is True then the existing index is dropped, if drop is False then the current index is made a column in the DataFrame with the index name the name of the column. If the index is a tuple multi-index then each element of the tuple is converted into a separate column. If the index name was 'index' then the column name will be 'index_0' to not conflict on print(). :param drop: if True then the current index is dropped, if False then index converted to columns :return: nothing
[ "Resets", "the", "index", "of", "the", "DataFrame", "to", "simple", "integer", "list", "and", "the", "index", "name", "to", "index", ".", "If", "drop", "is", "True", "then", "the", "existing", "index", "is", "dropped", "if", "drop", "is", "False", "then", "the", "current", "index", "is", "made", "a", "column", "in", "the", "DataFrame", "with", "the", "index", "name", "the", "name", "of", "the", "column", ".", "If", "the", "index", "is", "a", "tuple", "multi", "-", "index", "then", "each", "element", "of", "the", "tuple", "is", "converted", "into", "a", "separate", "column", ".", "If", "the", "index", "name", "was", "index", "then", "the", "column", "name", "will", "be", "index_0", "to", "not", "conflict", "on", "print", "()", "." ]
train
https://github.com/rsheftel/raccoon/blob/e5c4b5fb933b51f33aff11e8168c39790e9a7c75/raccoon/dataframe.py#L1166-L1186
rsheftel/raccoon
raccoon/dataframe.py
DataFrame.from_json
def from_json(cls, json_string): """ Creates and return a DataFrame from a JSON of the type created by to_json :param json_string: JSON :return: DataFrame """ input_dict = json.loads(json_string) # convert index to tuple if required if input_dict['index'] and isinstance(input_dict['index'][0], list): input_dict['index'] = [tuple(x) for x in input_dict['index']] # convert index_name to tuple if required if isinstance(input_dict['meta_data']['index_name'], list): input_dict['meta_data']['index_name'] = tuple(input_dict['meta_data']['index_name']) data = input_dict['data'] if input_dict['data'] else None return cls(data=data, index=input_dict['index'], **input_dict['meta_data'])
python
def from_json(cls, json_string): """ Creates and return a DataFrame from a JSON of the type created by to_json :param json_string: JSON :return: DataFrame """ input_dict = json.loads(json_string) # convert index to tuple if required if input_dict['index'] and isinstance(input_dict['index'][0], list): input_dict['index'] = [tuple(x) for x in input_dict['index']] # convert index_name to tuple if required if isinstance(input_dict['meta_data']['index_name'], list): input_dict['meta_data']['index_name'] = tuple(input_dict['meta_data']['index_name']) data = input_dict['data'] if input_dict['data'] else None return cls(data=data, index=input_dict['index'], **input_dict['meta_data'])
[ "def", "from_json", "(", "cls", ",", "json_string", ")", ":", "input_dict", "=", "json", ".", "loads", "(", "json_string", ")", "# convert index to tuple if required", "if", "input_dict", "[", "'index'", "]", "and", "isinstance", "(", "input_dict", "[", "'index'", "]", "[", "0", "]", ",", "list", ")", ":", "input_dict", "[", "'index'", "]", "=", "[", "tuple", "(", "x", ")", "for", "x", "in", "input_dict", "[", "'index'", "]", "]", "# convert index_name to tuple if required", "if", "isinstance", "(", "input_dict", "[", "'meta_data'", "]", "[", "'index_name'", "]", ",", "list", ")", ":", "input_dict", "[", "'meta_data'", "]", "[", "'index_name'", "]", "=", "tuple", "(", "input_dict", "[", "'meta_data'", "]", "[", "'index_name'", "]", ")", "data", "=", "input_dict", "[", "'data'", "]", "if", "input_dict", "[", "'data'", "]", "else", "None", "return", "cls", "(", "data", "=", "data", ",", "index", "=", "input_dict", "[", "'index'", "]", ",", "*", "*", "input_dict", "[", "'meta_data'", "]", ")" ]
Creates and return a DataFrame from a JSON of the type created by to_json :param json_string: JSON :return: DataFrame
[ "Creates", "and", "return", "a", "DataFrame", "from", "a", "JSON", "of", "the", "type", "created", "by", "to_json" ]
train
https://github.com/rsheftel/raccoon/blob/e5c4b5fb933b51f33aff11e8168c39790e9a7c75/raccoon/dataframe.py#L1190-L1205
vmlaker/mpipe
src/FilterWorker.py
FilterWorker.doTask
def doTask(self, task): """Filter input *task* to pipelines -- make sure each one has no more than *max_tasks* tasks in it. Return a tuple (*task*, *results*) where *task* is the given task, and *results* is a list of latest retrieved results from pipelines.""" # If we're not caching, then clear the table of last results. if not self._cache_results: self._last_results = dict() # Iterate the list of pipelines, draining each one of any results. # For pipelines whose current stream has less than *max_tasks* tasks # remaining, feed them the current task. for pipe in self._pipelines: count = self._task_counts[pipe] # Let's attempt to drain all (if any) results from the pipeline. valid = True last_result = None while count and valid: valid, result = pipe.get(sys.float_info.min) if valid: last_result = result count -= 1 # Unless we're dropping results, save the last result (if any). if not self._drop_results: if last_result is not None: self._last_results[pipe] = last_result # If there is room for the task, or if it is a "stop" request, # put it on the pipeline. if count <= self._max_tasks-1 or task is None: pipe.put(task) count += 1 # Update the task count for the pipeline. self._task_counts[pipe] = count # If we're only propagating the task, do so now. if self._drop_results: return task # Otherwise, also propagate the assembly of pipeline results. all_results = [res for res in self._last_results.values()] return task, all_results
python
def doTask(self, task): """Filter input *task* to pipelines -- make sure each one has no more than *max_tasks* tasks in it. Return a tuple (*task*, *results*) where *task* is the given task, and *results* is a list of latest retrieved results from pipelines.""" # If we're not caching, then clear the table of last results. if not self._cache_results: self._last_results = dict() # Iterate the list of pipelines, draining each one of any results. # For pipelines whose current stream has less than *max_tasks* tasks # remaining, feed them the current task. for pipe in self._pipelines: count = self._task_counts[pipe] # Let's attempt to drain all (if any) results from the pipeline. valid = True last_result = None while count and valid: valid, result = pipe.get(sys.float_info.min) if valid: last_result = result count -= 1 # Unless we're dropping results, save the last result (if any). if not self._drop_results: if last_result is not None: self._last_results[pipe] = last_result # If there is room for the task, or if it is a "stop" request, # put it on the pipeline. if count <= self._max_tasks-1 or task is None: pipe.put(task) count += 1 # Update the task count for the pipeline. self._task_counts[pipe] = count # If we're only propagating the task, do so now. if self._drop_results: return task # Otherwise, also propagate the assembly of pipeline results. all_results = [res for res in self._last_results.values()] return task, all_results
[ "def", "doTask", "(", "self", ",", "task", ")", ":", "# If we're not caching, then clear the table of last results.", "if", "not", "self", ".", "_cache_results", ":", "self", ".", "_last_results", "=", "dict", "(", ")", "# Iterate the list of pipelines, draining each one of any results.", "# For pipelines whose current stream has less than *max_tasks* tasks ", "# remaining, feed them the current task.", "for", "pipe", "in", "self", ".", "_pipelines", ":", "count", "=", "self", ".", "_task_counts", "[", "pipe", "]", "# Let's attempt to drain all (if any) results from the pipeline.", "valid", "=", "True", "last_result", "=", "None", "while", "count", "and", "valid", ":", "valid", ",", "result", "=", "pipe", ".", "get", "(", "sys", ".", "float_info", ".", "min", ")", "if", "valid", ":", "last_result", "=", "result", "count", "-=", "1", "# Unless we're dropping results, save the last result (if any).", "if", "not", "self", ".", "_drop_results", ":", "if", "last_result", "is", "not", "None", ":", "self", ".", "_last_results", "[", "pipe", "]", "=", "last_result", "# If there is room for the task, or if it is a \"stop\" request,", "# put it on the pipeline.", "if", "count", "<=", "self", ".", "_max_tasks", "-", "1", "or", "task", "is", "None", ":", "pipe", ".", "put", "(", "task", ")", "count", "+=", "1", "# Update the task count for the pipeline.", "self", ".", "_task_counts", "[", "pipe", "]", "=", "count", "# If we're only propagating the task, do so now.", "if", "self", ".", "_drop_results", ":", "return", "task", "# Otherwise, also propagate the assembly of pipeline results.", "all_results", "=", "[", "res", "for", "res", "in", "self", ".", "_last_results", ".", "values", "(", ")", "]", "return", "task", ",", "all_results" ]
Filter input *task* to pipelines -- make sure each one has no more than *max_tasks* tasks in it. Return a tuple (*task*, *results*) where *task* is the given task, and *results* is a list of latest retrieved results from pipelines.
[ "Filter", "input", "*", "task", "*", "to", "pipelines", "--", "make", "sure", "each", "one", "has", "no", "more", "than", "*", "max_tasks", "*", "tasks", "in", "it", ".", "Return", "a", "tuple", "(", "*", "task", "*", "*", "results", "*", ")", "where", "*", "task", "*", "is", "the", "given", "task", "and", "*", "results", "*", "is", "a", "list", "of", "latest", "retrieved", "results", "from", "pipelines", "." ]
train
https://github.com/vmlaker/mpipe/blob/5a1804cf64271931f0cd3e4fff3e2b38291212dd/src/FilterWorker.py#L47-L94
rsheftel/raccoon
raccoon/utils.py
assert_frame_equal
def assert_frame_equal(left, right, data_function=None, data_args=None): """ For unit testing equality of two DataFrames. :param left: first DataFrame :param right: second DataFrame :param data_function: if provided will use this function to assert compare the df.data :param data_args: arguments to pass to the data_function :return: nothing """ if data_function: data_args = {} if not data_args else data_args data_function(left.data, right.data, **data_args) else: assert left.data == right.data assert left.index == right.index assert left.columns == right.columns assert left.index_name == right.index_name assert left.sort == right.sort assert left.blist == right.blist
python
def assert_frame_equal(left, right, data_function=None, data_args=None): """ For unit testing equality of two DataFrames. :param left: first DataFrame :param right: second DataFrame :param data_function: if provided will use this function to assert compare the df.data :param data_args: arguments to pass to the data_function :return: nothing """ if data_function: data_args = {} if not data_args else data_args data_function(left.data, right.data, **data_args) else: assert left.data == right.data assert left.index == right.index assert left.columns == right.columns assert left.index_name == right.index_name assert left.sort == right.sort assert left.blist == right.blist
[ "def", "assert_frame_equal", "(", "left", ",", "right", ",", "data_function", "=", "None", ",", "data_args", "=", "None", ")", ":", "if", "data_function", ":", "data_args", "=", "{", "}", "if", "not", "data_args", "else", "data_args", "data_function", "(", "left", ".", "data", ",", "right", ".", "data", ",", "*", "*", "data_args", ")", "else", ":", "assert", "left", ".", "data", "==", "right", ".", "data", "assert", "left", ".", "index", "==", "right", ".", "index", "assert", "left", ".", "columns", "==", "right", ".", "columns", "assert", "left", ".", "index_name", "==", "right", ".", "index_name", "assert", "left", ".", "sort", "==", "right", ".", "sort", "assert", "left", ".", "blist", "==", "right", ".", "blist" ]
For unit testing equality of two DataFrames. :param left: first DataFrame :param right: second DataFrame :param data_function: if provided will use this function to assert compare the df.data :param data_args: arguments to pass to the data_function :return: nothing
[ "For", "unit", "testing", "equality", "of", "two", "DataFrames", "." ]
train
https://github.com/rsheftel/raccoon/blob/e5c4b5fb933b51f33aff11e8168c39790e9a7c75/raccoon/utils.py#L8-L27
rsheftel/raccoon
raccoon/utils.py
assert_series_equal
def assert_series_equal(left, right, data_function=None, data_args=None): """ For unit testing equality of two Series. :param left: first Series :param right: second Series :param data_function: if provided will use this function to assert compare the df.data :param data_args: arguments to pass to the data_function :return: nothing """ assert type(left) == type(right) if data_function: data_args = {} if not data_args else data_args data_function(left.data, right.data, **data_args) else: assert left.data == right.data assert left.index == right.index assert left.data_name == right.data_name assert left.index_name == right.index_name assert left.sort == right.sort if isinstance(left, rc.ViewSeries): assert left.offset == right.offset if isinstance(left, rc.Series): assert left.blist == right.blist
python
def assert_series_equal(left, right, data_function=None, data_args=None): """ For unit testing equality of two Series. :param left: first Series :param right: second Series :param data_function: if provided will use this function to assert compare the df.data :param data_args: arguments to pass to the data_function :return: nothing """ assert type(left) == type(right) if data_function: data_args = {} if not data_args else data_args data_function(left.data, right.data, **data_args) else: assert left.data == right.data assert left.index == right.index assert left.data_name == right.data_name assert left.index_name == right.index_name assert left.sort == right.sort if isinstance(left, rc.ViewSeries): assert left.offset == right.offset if isinstance(left, rc.Series): assert left.blist == right.blist
[ "def", "assert_series_equal", "(", "left", ",", "right", ",", "data_function", "=", "None", ",", "data_args", "=", "None", ")", ":", "assert", "type", "(", "left", ")", "==", "type", "(", "right", ")", "if", "data_function", ":", "data_args", "=", "{", "}", "if", "not", "data_args", "else", "data_args", "data_function", "(", "left", ".", "data", ",", "right", ".", "data", ",", "*", "*", "data_args", ")", "else", ":", "assert", "left", ".", "data", "==", "right", ".", "data", "assert", "left", ".", "index", "==", "right", ".", "index", "assert", "left", ".", "data_name", "==", "right", ".", "data_name", "assert", "left", ".", "index_name", "==", "right", ".", "index_name", "assert", "left", ".", "sort", "==", "right", ".", "sort", "if", "isinstance", "(", "left", ",", "rc", ".", "ViewSeries", ")", ":", "assert", "left", ".", "offset", "==", "right", ".", "offset", "if", "isinstance", "(", "left", ",", "rc", ".", "Series", ")", ":", "assert", "left", ".", "blist", "==", "right", ".", "blist" ]
For unit testing equality of two Series. :param left: first Series :param right: second Series :param data_function: if provided will use this function to assert compare the df.data :param data_args: arguments to pass to the data_function :return: nothing
[ "For", "unit", "testing", "equality", "of", "two", "Series", "." ]
train
https://github.com/rsheftel/raccoon/blob/e5c4b5fb933b51f33aff11e8168c39790e9a7c75/raccoon/utils.py#L30-L53
vmlaker/mpipe
src/Pipeline.py
Pipeline.get
def get(self, timeout=None): """Return result from the pipeline.""" result = None for stage in self._output_stages: result = stage.get(timeout) return result
python
def get(self, timeout=None): """Return result from the pipeline.""" result = None for stage in self._output_stages: result = stage.get(timeout) return result
[ "def", "get", "(", "self", ",", "timeout", "=", "None", ")", ":", "result", "=", "None", "for", "stage", "in", "self", ".", "_output_stages", ":", "result", "=", "stage", ".", "get", "(", "timeout", ")", "return", "result" ]
Return result from the pipeline.
[ "Return", "result", "from", "the", "pipeline", "." ]
train
https://github.com/vmlaker/mpipe/blob/5a1804cf64271931f0cd3e4fff3e2b38291212dd/src/Pipeline.py#L15-L20
brenns10/tswift
tswift.py
main
def main(): """ Run the CLI. """ parser = argparse.ArgumentParser( description='Search artists, lyrics, and songs!' ) parser.add_argument( 'artist', help='Specify an artist name (Default: Taylor Swift)', default='Taylor Swift', nargs='?', ) parser.add_argument( '-s', '--song', help='Given artist name, specify a song name', required=False, ) parser.add_argument( '-l', '--lyrics', help='Search for song by lyrics', required=False, ) args = parser.parse_args() if args.lyrics: song = Song.find_song(args.lyrics) else: if args.song: song = Song( title=args.song, artist=args.artist, ) else: artist = Artist(args.artist) if artist.songs: song = random.choice(artist.songs) else: print('Couldn\'t find any songs by artist {}!' .format(args.artist)) sys.exit(1) print(song.format())
python
def main(): """ Run the CLI. """ parser = argparse.ArgumentParser( description='Search artists, lyrics, and songs!' ) parser.add_argument( 'artist', help='Specify an artist name (Default: Taylor Swift)', default='Taylor Swift', nargs='?', ) parser.add_argument( '-s', '--song', help='Given artist name, specify a song name', required=False, ) parser.add_argument( '-l', '--lyrics', help='Search for song by lyrics', required=False, ) args = parser.parse_args() if args.lyrics: song = Song.find_song(args.lyrics) else: if args.song: song = Song( title=args.song, artist=args.artist, ) else: artist = Artist(args.artist) if artist.songs: song = random.choice(artist.songs) else: print('Couldn\'t find any songs by artist {}!' .format(args.artist)) sys.exit(1) print(song.format())
[ "def", "main", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Search artists, lyrics, and songs!'", ")", "parser", ".", "add_argument", "(", "'artist'", ",", "help", "=", "'Specify an artist name (Default: Taylor Swift)'", ",", "default", "=", "'Taylor Swift'", ",", "nargs", "=", "'?'", ",", ")", "parser", ".", "add_argument", "(", "'-s'", ",", "'--song'", ",", "help", "=", "'Given artist name, specify a song name'", ",", "required", "=", "False", ",", ")", "parser", ".", "add_argument", "(", "'-l'", ",", "'--lyrics'", ",", "help", "=", "'Search for song by lyrics'", ",", "required", "=", "False", ",", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "if", "args", ".", "lyrics", ":", "song", "=", "Song", ".", "find_song", "(", "args", ".", "lyrics", ")", "else", ":", "if", "args", ".", "song", ":", "song", "=", "Song", "(", "title", "=", "args", ".", "song", ",", "artist", "=", "args", ".", "artist", ",", ")", "else", ":", "artist", "=", "Artist", "(", "args", ".", "artist", ")", "if", "artist", ".", "songs", ":", "song", "=", "random", ".", "choice", "(", "artist", ".", "songs", ")", "else", ":", "print", "(", "'Couldn\\'t find any songs by artist {}!'", ".", "format", "(", "args", ".", "artist", ")", ")", "sys", ".", "exit", "(", "1", ")", "print", "(", "song", ".", "format", "(", ")", ")" ]
Run the CLI.
[ "Run", "the", "CLI", "." ]
train
https://github.com/brenns10/tswift/blob/f4a8f3127e088d6b3a9669496f107c2704f4d1a3/tswift.py#L156-L198
brenns10/tswift
tswift.py
Song.load
def load(self): """Load the lyrics from MetroLyrics.""" page = requests.get(self._url) # Forces utf-8 to prevent character mangling page.encoding = 'utf-8' tree = html.fromstring(page.text) lyric_div = tree.get_element_by_id('lyrics-body-text') verses = [c.text_content() for c in lyric_div.find_class('verse')] self._lyrics = '\n\n'.join(verses) return self
python
def load(self): """Load the lyrics from MetroLyrics.""" page = requests.get(self._url) # Forces utf-8 to prevent character mangling page.encoding = 'utf-8' tree = html.fromstring(page.text) lyric_div = tree.get_element_by_id('lyrics-body-text') verses = [c.text_content() for c in lyric_div.find_class('verse')] self._lyrics = '\n\n'.join(verses) return self
[ "def", "load", "(", "self", ")", ":", "page", "=", "requests", ".", "get", "(", "self", ".", "_url", ")", "# Forces utf-8 to prevent character mangling", "page", ".", "encoding", "=", "'utf-8'", "tree", "=", "html", ".", "fromstring", "(", "page", ".", "text", ")", "lyric_div", "=", "tree", ".", "get_element_by_id", "(", "'lyrics-body-text'", ")", "verses", "=", "[", "c", ".", "text_content", "(", ")", "for", "c", "in", "lyric_div", ".", "find_class", "(", "'verse'", ")", "]", "self", ".", "_lyrics", "=", "'\\n\\n'", ".", "join", "(", "verses", ")", "return", "self" ]
Load the lyrics from MetroLyrics.
[ "Load", "the", "lyrics", "from", "MetroLyrics", "." ]
train
https://github.com/brenns10/tswift/blob/f4a8f3127e088d6b3a9669496f107c2704f4d1a3/tswift.py#L61-L71
brenns10/tswift
tswift.py
Artist.load
def load(self, verbose=False): """ Load the list of songs. Note that this only loads a list of songs that this artist was the main artist of. If they were only featured in the song, that song won't be listed here. There is a list on the artist page for that, I just haven't added any parsing code for that, since I don't need it. """ self._songs = [] page_num = 1 total_pages = 1 while page_num <= total_pages: if verbose: print('retrieving page %d' % page_num) page = requests.get(ARTIST_URL.format(artist=self.name, n=page_num)) tree = html.fromstring(page.text) song_rows_xp = r'//*[@id="popular"]/div/table/tbody/tr' songlist_pagination_xp = r'//*[@id="main-content"]/div[1]/'\ 'div[2]/p/span/a' rows = tree.xpath(song_rows_xp) for row in rows: song_link = row.xpath(r'./td/a[contains(@class,"title")]') assert len(song_link) == 1 self._songs.append(Song(url=song_link[0].attrib['href'])) total_pages = len(tree.xpath(songlist_pagination_xp)) page_num += 1 return self
python
def load(self, verbose=False): """ Load the list of songs. Note that this only loads a list of songs that this artist was the main artist of. If they were only featured in the song, that song won't be listed here. There is a list on the artist page for that, I just haven't added any parsing code for that, since I don't need it. """ self._songs = [] page_num = 1 total_pages = 1 while page_num <= total_pages: if verbose: print('retrieving page %d' % page_num) page = requests.get(ARTIST_URL.format(artist=self.name, n=page_num)) tree = html.fromstring(page.text) song_rows_xp = r'//*[@id="popular"]/div/table/tbody/tr' songlist_pagination_xp = r'//*[@id="main-content"]/div[1]/'\ 'div[2]/p/span/a' rows = tree.xpath(song_rows_xp) for row in rows: song_link = row.xpath(r'./td/a[contains(@class,"title")]') assert len(song_link) == 1 self._songs.append(Song(url=song_link[0].attrib['href'])) total_pages = len(tree.xpath(songlist_pagination_xp)) page_num += 1 return self
[ "def", "load", "(", "self", ",", "verbose", "=", "False", ")", ":", "self", ".", "_songs", "=", "[", "]", "page_num", "=", "1", "total_pages", "=", "1", "while", "page_num", "<=", "total_pages", ":", "if", "verbose", ":", "print", "(", "'retrieving page %d'", "%", "page_num", ")", "page", "=", "requests", ".", "get", "(", "ARTIST_URL", ".", "format", "(", "artist", "=", "self", ".", "name", ",", "n", "=", "page_num", ")", ")", "tree", "=", "html", ".", "fromstring", "(", "page", ".", "text", ")", "song_rows_xp", "=", "r'//*[@id=\"popular\"]/div/table/tbody/tr'", "songlist_pagination_xp", "=", "r'//*[@id=\"main-content\"]/div[1]/'", "'div[2]/p/span/a'", "rows", "=", "tree", ".", "xpath", "(", "song_rows_xp", ")", "for", "row", "in", "rows", ":", "song_link", "=", "row", ".", "xpath", "(", "r'./td/a[contains(@class,\"title\")]'", ")", "assert", "len", "(", "song_link", ")", "==", "1", "self", ".", "_songs", ".", "append", "(", "Song", "(", "url", "=", "song_link", "[", "0", "]", ".", "attrib", "[", "'href'", "]", ")", ")", "total_pages", "=", "len", "(", "tree", ".", "xpath", "(", "songlist_pagination_xp", ")", ")", "page_num", "+=", "1", "return", "self" ]
Load the list of songs. Note that this only loads a list of songs that this artist was the main artist of. If they were only featured in the song, that song won't be listed here. There is a list on the artist page for that, I just haven't added any parsing code for that, since I don't need it.
[ "Load", "the", "list", "of", "songs", "." ]
train
https://github.com/brenns10/tswift/blob/f4a8f3127e088d6b3a9669496f107c2704f4d1a3/tswift.py#L113-L144
jwass/geog
geog/geog.py
distance
def distance(p0, p1, deg=True, r=r_earth_mean): """ Return the distance between two points on the surface of the Earth. Parameters ---------- p0 : point-like (or array of point-like) [longitude, latitude] objects p1 : point-like (or array of point-like) [longitude, latitude] objects deg : bool, optional (default True) indicates if p0 and p1 are specified in degrees r : float, optional (default r_earth_mean) radius of the sphere Returns ------- d : float Reference --------- http://www.movable-type.co.uk/scripts/latlong.html - Distance Note: Spherical earth model. By default uses radius of 6371.0 km. """ single, (p0, p1) = _to_arrays((p0, 2), (p1, 2)) if deg: p0 = np.radians(p0) p1 = np.radians(p1) lon0, lat0 = p0[:,0], p0[:,1] lon1, lat1 = p1[:,0], p1[:,1] # h_x used to denote haversine(x): sin^2(x / 2) h_dlat = sin((lat1 - lat0) / 2.0) ** 2 h_dlon = sin((lon1 - lon0) / 2.0) ** 2 h_angle = h_dlat + cos(lat0) * cos(lat1) * h_dlon angle = 2.0 * arcsin(sqrt(h_angle)) d = r * angle if single: d = d[0] return d
python
def distance(p0, p1, deg=True, r=r_earth_mean): """ Return the distance between two points on the surface of the Earth. Parameters ---------- p0 : point-like (or array of point-like) [longitude, latitude] objects p1 : point-like (or array of point-like) [longitude, latitude] objects deg : bool, optional (default True) indicates if p0 and p1 are specified in degrees r : float, optional (default r_earth_mean) radius of the sphere Returns ------- d : float Reference --------- http://www.movable-type.co.uk/scripts/latlong.html - Distance Note: Spherical earth model. By default uses radius of 6371.0 km. """ single, (p0, p1) = _to_arrays((p0, 2), (p1, 2)) if deg: p0 = np.radians(p0) p1 = np.radians(p1) lon0, lat0 = p0[:,0], p0[:,1] lon1, lat1 = p1[:,0], p1[:,1] # h_x used to denote haversine(x): sin^2(x / 2) h_dlat = sin((lat1 - lat0) / 2.0) ** 2 h_dlon = sin((lon1 - lon0) / 2.0) ** 2 h_angle = h_dlat + cos(lat0) * cos(lat1) * h_dlon angle = 2.0 * arcsin(sqrt(h_angle)) d = r * angle if single: d = d[0] return d
[ "def", "distance", "(", "p0", ",", "p1", ",", "deg", "=", "True", ",", "r", "=", "r_earth_mean", ")", ":", "single", ",", "(", "p0", ",", "p1", ")", "=", "_to_arrays", "(", "(", "p0", ",", "2", ")", ",", "(", "p1", ",", "2", ")", ")", "if", "deg", ":", "p0", "=", "np", ".", "radians", "(", "p0", ")", "p1", "=", "np", ".", "radians", "(", "p1", ")", "lon0", ",", "lat0", "=", "p0", "[", ":", ",", "0", "]", ",", "p0", "[", ":", ",", "1", "]", "lon1", ",", "lat1", "=", "p1", "[", ":", ",", "0", "]", ",", "p1", "[", ":", ",", "1", "]", "# h_x used to denote haversine(x): sin^2(x / 2)", "h_dlat", "=", "sin", "(", "(", "lat1", "-", "lat0", ")", "/", "2.0", ")", "**", "2", "h_dlon", "=", "sin", "(", "(", "lon1", "-", "lon0", ")", "/", "2.0", ")", "**", "2", "h_angle", "=", "h_dlat", "+", "cos", "(", "lat0", ")", "*", "cos", "(", "lat1", ")", "*", "h_dlon", "angle", "=", "2.0", "*", "arcsin", "(", "sqrt", "(", "h_angle", ")", ")", "d", "=", "r", "*", "angle", "if", "single", ":", "d", "=", "d", "[", "0", "]", "return", "d" ]
Return the distance between two points on the surface of the Earth. Parameters ---------- p0 : point-like (or array of point-like) [longitude, latitude] objects p1 : point-like (or array of point-like) [longitude, latitude] objects deg : bool, optional (default True) indicates if p0 and p1 are specified in degrees r : float, optional (default r_earth_mean) radius of the sphere Returns ------- d : float Reference --------- http://www.movable-type.co.uk/scripts/latlong.html - Distance Note: Spherical earth model. By default uses radius of 6371.0 km.
[ "Return", "the", "distance", "between", "two", "points", "on", "the", "surface", "of", "the", "Earth", "." ]
train
https://github.com/jwass/geog/blob/52ceb9b543454b31c63694ee459aad9cd52f011a/geog/geog.py#L32-L74
jwass/geog
geog/geog.py
course
def course(p0, p1, deg=True, bearing=False): """ Compute the initial bearing along the great circle from p0 to p1 NB: The angle returned by course() is not the traditional definition of bearing. It is definted such that 0 degrees to due East increasing counter-clockwise such that 90 degrees is due North. To obtain the bearing (0 degrees is due North increasing clockwise so that 90 degrees is due East), set the bearing flag input to True. Parameters ---------- p0 : point-like (or array of point-like) [lon, lat] objects p1 : point-like (or array of point-like) [lon, lat] objects deg : bool, optional (default True) indicates if p0 and p1 are specified in degrees. The returned angle is returned in the same units as the input. bearing : bool, optional (default False) If True, use the classical definition of bearing where 0 degrees is due North increasing clockwise so that and 90 degrees is due East. Reference --------- http://www.movable-type.co.uk/scripts/latlong.html - Bearing """ single, (p0, p1) = _to_arrays((p0, 2), (p1, 2)) if deg: p0 = np.radians(p0) p1 = np.radians(p1) lon0, lat0 = p0[:,0], p0[:,1] lon1, lat1 = p1[:,0], p1[:,1] dlon = lon1 - lon0 a = sin(dlon) * cos(lat1) b = cos(lat0) * sin(lat1) - sin(lat0) * cos(lat1) * cos(dlon) if bearing: angle = arctan2(a, b) else: angle = arctan2(b, a) if deg: angle = np.degrees(angle) if single: angle = angle[0] return angle
python
def course(p0, p1, deg=True, bearing=False): """ Compute the initial bearing along the great circle from p0 to p1 NB: The angle returned by course() is not the traditional definition of bearing. It is definted such that 0 degrees to due East increasing counter-clockwise such that 90 degrees is due North. To obtain the bearing (0 degrees is due North increasing clockwise so that 90 degrees is due East), set the bearing flag input to True. Parameters ---------- p0 : point-like (or array of point-like) [lon, lat] objects p1 : point-like (or array of point-like) [lon, lat] objects deg : bool, optional (default True) indicates if p0 and p1 are specified in degrees. The returned angle is returned in the same units as the input. bearing : bool, optional (default False) If True, use the classical definition of bearing where 0 degrees is due North increasing clockwise so that and 90 degrees is due East. Reference --------- http://www.movable-type.co.uk/scripts/latlong.html - Bearing """ single, (p0, p1) = _to_arrays((p0, 2), (p1, 2)) if deg: p0 = np.radians(p0) p1 = np.radians(p1) lon0, lat0 = p0[:,0], p0[:,1] lon1, lat1 = p1[:,0], p1[:,1] dlon = lon1 - lon0 a = sin(dlon) * cos(lat1) b = cos(lat0) * sin(lat1) - sin(lat0) * cos(lat1) * cos(dlon) if bearing: angle = arctan2(a, b) else: angle = arctan2(b, a) if deg: angle = np.degrees(angle) if single: angle = angle[0] return angle
[ "def", "course", "(", "p0", ",", "p1", ",", "deg", "=", "True", ",", "bearing", "=", "False", ")", ":", "single", ",", "(", "p0", ",", "p1", ")", "=", "_to_arrays", "(", "(", "p0", ",", "2", ")", ",", "(", "p1", ",", "2", ")", ")", "if", "deg", ":", "p0", "=", "np", ".", "radians", "(", "p0", ")", "p1", "=", "np", ".", "radians", "(", "p1", ")", "lon0", ",", "lat0", "=", "p0", "[", ":", ",", "0", "]", ",", "p0", "[", ":", ",", "1", "]", "lon1", ",", "lat1", "=", "p1", "[", ":", ",", "0", "]", ",", "p1", "[", ":", ",", "1", "]", "dlon", "=", "lon1", "-", "lon0", "a", "=", "sin", "(", "dlon", ")", "*", "cos", "(", "lat1", ")", "b", "=", "cos", "(", "lat0", ")", "*", "sin", "(", "lat1", ")", "-", "sin", "(", "lat0", ")", "*", "cos", "(", "lat1", ")", "*", "cos", "(", "dlon", ")", "if", "bearing", ":", "angle", "=", "arctan2", "(", "a", ",", "b", ")", "else", ":", "angle", "=", "arctan2", "(", "b", ",", "a", ")", "if", "deg", ":", "angle", "=", "np", ".", "degrees", "(", "angle", ")", "if", "single", ":", "angle", "=", "angle", "[", "0", "]", "return", "angle" ]
Compute the initial bearing along the great circle from p0 to p1 NB: The angle returned by course() is not the traditional definition of bearing. It is definted such that 0 degrees to due East increasing counter-clockwise such that 90 degrees is due North. To obtain the bearing (0 degrees is due North increasing clockwise so that 90 degrees is due East), set the bearing flag input to True. Parameters ---------- p0 : point-like (or array of point-like) [lon, lat] objects p1 : point-like (or array of point-like) [lon, lat] objects deg : bool, optional (default True) indicates if p0 and p1 are specified in degrees. The returned angle is returned in the same units as the input. bearing : bool, optional (default False) If True, use the classical definition of bearing where 0 degrees is due North increasing clockwise so that and 90 degrees is due East. Reference --------- http://www.movable-type.co.uk/scripts/latlong.html - Bearing
[ "Compute", "the", "initial", "bearing", "along", "the", "great", "circle", "from", "p0", "to", "p1" ]
train
https://github.com/jwass/geog/blob/52ceb9b543454b31c63694ee459aad9cd52f011a/geog/geog.py#L77-L126
jwass/geog
geog/geog.py
propagate
def propagate(p0, angle, d, deg=True, bearing=False, r=r_earth_mean): """ Given an initial point and angle, move distance d along the surface Parameters ---------- p0 : point-like (or array of point-like) [lon, lat] objects angle : float (or array of float) bearing. Note that by default, 0 degrees is due East increasing clockwise so that 90 degrees is due North. See the bearing flag to change the meaning of this angle d : float (or array of float) distance to move. The units of d should be consistent with input r deg : bool, optional (default True) Whether both p0 and angle are specified in degrees. The output points will also match the value of this flag. bearing : bool, optional (default False) Indicates whether to interpret the input angle as the classical definition of bearing. r : float, optional (default r_earth_mean) radius of the sphere Reference --------- http://www.movable-type.co.uk/scripts/latlong.html - Destination Note: Spherical earth model. By default uses radius of 6371.0 km. """ single, (p0, angle, d) = _to_arrays((p0, 2), (angle, 1), (d, 1)) if deg: p0 = np.radians(p0) angle = np.radians(angle) if not bearing: angle = np.pi / 2.0 - angle lon0, lat0 = p0[:,0], p0[:,1] angd = d / r lat1 = arcsin(sin(lat0) * cos(angd) + cos(lat0) * sin(angd) * cos(angle)) a = sin(angle) * sin(angd) * cos(lat0) b = cos(angd) - sin(lat0) * sin(lat1) lon1 = lon0 + arctan2(a, b) p1 = np.column_stack([lon1, lat1]) if deg: p1 = np.degrees(p1) if single: p1 = p1[0] return p1
python
def propagate(p0, angle, d, deg=True, bearing=False, r=r_earth_mean): """ Given an initial point and angle, move distance d along the surface Parameters ---------- p0 : point-like (or array of point-like) [lon, lat] objects angle : float (or array of float) bearing. Note that by default, 0 degrees is due East increasing clockwise so that 90 degrees is due North. See the bearing flag to change the meaning of this angle d : float (or array of float) distance to move. The units of d should be consistent with input r deg : bool, optional (default True) Whether both p0 and angle are specified in degrees. The output points will also match the value of this flag. bearing : bool, optional (default False) Indicates whether to interpret the input angle as the classical definition of bearing. r : float, optional (default r_earth_mean) radius of the sphere Reference --------- http://www.movable-type.co.uk/scripts/latlong.html - Destination Note: Spherical earth model. By default uses radius of 6371.0 km. """ single, (p0, angle, d) = _to_arrays((p0, 2), (angle, 1), (d, 1)) if deg: p0 = np.radians(p0) angle = np.radians(angle) if not bearing: angle = np.pi / 2.0 - angle lon0, lat0 = p0[:,0], p0[:,1] angd = d / r lat1 = arcsin(sin(lat0) * cos(angd) + cos(lat0) * sin(angd) * cos(angle)) a = sin(angle) * sin(angd) * cos(lat0) b = cos(angd) - sin(lat0) * sin(lat1) lon1 = lon0 + arctan2(a, b) p1 = np.column_stack([lon1, lat1]) if deg: p1 = np.degrees(p1) if single: p1 = p1[0] return p1
[ "def", "propagate", "(", "p0", ",", "angle", ",", "d", ",", "deg", "=", "True", ",", "bearing", "=", "False", ",", "r", "=", "r_earth_mean", ")", ":", "single", ",", "(", "p0", ",", "angle", ",", "d", ")", "=", "_to_arrays", "(", "(", "p0", ",", "2", ")", ",", "(", "angle", ",", "1", ")", ",", "(", "d", ",", "1", ")", ")", "if", "deg", ":", "p0", "=", "np", ".", "radians", "(", "p0", ")", "angle", "=", "np", ".", "radians", "(", "angle", ")", "if", "not", "bearing", ":", "angle", "=", "np", ".", "pi", "/", "2.0", "-", "angle", "lon0", ",", "lat0", "=", "p0", "[", ":", ",", "0", "]", ",", "p0", "[", ":", ",", "1", "]", "angd", "=", "d", "/", "r", "lat1", "=", "arcsin", "(", "sin", "(", "lat0", ")", "*", "cos", "(", "angd", ")", "+", "cos", "(", "lat0", ")", "*", "sin", "(", "angd", ")", "*", "cos", "(", "angle", ")", ")", "a", "=", "sin", "(", "angle", ")", "*", "sin", "(", "angd", ")", "*", "cos", "(", "lat0", ")", "b", "=", "cos", "(", "angd", ")", "-", "sin", "(", "lat0", ")", "*", "sin", "(", "lat1", ")", "lon1", "=", "lon0", "+", "arctan2", "(", "a", ",", "b", ")", "p1", "=", "np", ".", "column_stack", "(", "[", "lon1", ",", "lat1", "]", ")", "if", "deg", ":", "p1", "=", "np", ".", "degrees", "(", "p1", ")", "if", "single", ":", "p1", "=", "p1", "[", "0", "]", "return", "p1" ]
Given an initial point and angle, move distance d along the surface Parameters ---------- p0 : point-like (or array of point-like) [lon, lat] objects angle : float (or array of float) bearing. Note that by default, 0 degrees is due East increasing clockwise so that 90 degrees is due North. See the bearing flag to change the meaning of this angle d : float (or array of float) distance to move. The units of d should be consistent with input r deg : bool, optional (default True) Whether both p0 and angle are specified in degrees. The output points will also match the value of this flag. bearing : bool, optional (default False) Indicates whether to interpret the input angle as the classical definition of bearing. r : float, optional (default r_earth_mean) radius of the sphere Reference --------- http://www.movable-type.co.uk/scripts/latlong.html - Destination Note: Spherical earth model. By default uses radius of 6371.0 km.
[ "Given", "an", "initial", "point", "and", "angle", "move", "distance", "d", "along", "the", "surface" ]
train
https://github.com/jwass/geog/blob/52ceb9b543454b31c63694ee459aad9cd52f011a/geog/geog.py#L129-L184
lepture/flask-weixin
flask_weixin.py
Weixin.validate
def validate(self, signature, timestamp, nonce): """Validate request signature. :param signature: A string signature parameter sent by weixin. :param timestamp: A int timestamp parameter sent by weixin. :param nonce: A int nonce parameter sent by weixin. """ if not self.token: raise RuntimeError('WEIXIN_TOKEN is missing') if self.expires_in: try: timestamp = int(timestamp) except (ValueError, TypeError): # fake timestamp return False delta = time.time() - timestamp if delta < 0: # this is a fake timestamp return False if delta > self.expires_in: # expired timestamp return False values = [self.token, str(timestamp), str(nonce)] s = ''.join(sorted(values)) hsh = hashlib.sha1(s.encode('utf-8')).hexdigest() return signature == hsh
python
def validate(self, signature, timestamp, nonce): """Validate request signature. :param signature: A string signature parameter sent by weixin. :param timestamp: A int timestamp parameter sent by weixin. :param nonce: A int nonce parameter sent by weixin. """ if not self.token: raise RuntimeError('WEIXIN_TOKEN is missing') if self.expires_in: try: timestamp = int(timestamp) except (ValueError, TypeError): # fake timestamp return False delta = time.time() - timestamp if delta < 0: # this is a fake timestamp return False if delta > self.expires_in: # expired timestamp return False values = [self.token, str(timestamp), str(nonce)] s = ''.join(sorted(values)) hsh = hashlib.sha1(s.encode('utf-8')).hexdigest() return signature == hsh
[ "def", "validate", "(", "self", ",", "signature", ",", "timestamp", ",", "nonce", ")", ":", "if", "not", "self", ".", "token", ":", "raise", "RuntimeError", "(", "'WEIXIN_TOKEN is missing'", ")", "if", "self", ".", "expires_in", ":", "try", ":", "timestamp", "=", "int", "(", "timestamp", ")", "except", "(", "ValueError", ",", "TypeError", ")", ":", "# fake timestamp", "return", "False", "delta", "=", "time", ".", "time", "(", ")", "-", "timestamp", "if", "delta", "<", "0", ":", "# this is a fake timestamp", "return", "False", "if", "delta", ">", "self", ".", "expires_in", ":", "# expired timestamp", "return", "False", "values", "=", "[", "self", ".", "token", ",", "str", "(", "timestamp", ")", ",", "str", "(", "nonce", ")", "]", "s", "=", "''", ".", "join", "(", "sorted", "(", "values", ")", ")", "hsh", "=", "hashlib", ".", "sha1", "(", "s", ".", "encode", "(", "'utf-8'", ")", ")", ".", "hexdigest", "(", ")", "return", "signature", "==", "hsh" ]
Validate request signature. :param signature: A string signature parameter sent by weixin. :param timestamp: A int timestamp parameter sent by weixin. :param nonce: A int nonce parameter sent by weixin.
[ "Validate", "request", "signature", "." ]
train
https://github.com/lepture/flask-weixin/blob/abf0b507d41b9780257507aa78fc0817fdc75719/flask_weixin.py#L77-L106
lepture/flask-weixin
flask_weixin.py
Weixin.parse
def parse(self, content): """Parse xml body sent by weixin. :param content: A text of xml body. """ raw = {} try: root = etree.fromstring(content) except SyntaxError as e: raise ValueError(*e.args) for child in root: raw[child.tag] = child.text formatted = self.format(raw) msg_type = formatted['type'] msg_parser = getattr(self, 'parse_%s' % msg_type, None) if callable(msg_parser): parsed = msg_parser(raw) else: parsed = self.parse_invalid_type(raw) formatted.update(parsed) return formatted
python
def parse(self, content): """Parse xml body sent by weixin. :param content: A text of xml body. """ raw = {} try: root = etree.fromstring(content) except SyntaxError as e: raise ValueError(*e.args) for child in root: raw[child.tag] = child.text formatted = self.format(raw) msg_type = formatted['type'] msg_parser = getattr(self, 'parse_%s' % msg_type, None) if callable(msg_parser): parsed = msg_parser(raw) else: parsed = self.parse_invalid_type(raw) formatted.update(parsed) return formatted
[ "def", "parse", "(", "self", ",", "content", ")", ":", "raw", "=", "{", "}", "try", ":", "root", "=", "etree", ".", "fromstring", "(", "content", ")", "except", "SyntaxError", "as", "e", ":", "raise", "ValueError", "(", "*", "e", ".", "args", ")", "for", "child", "in", "root", ":", "raw", "[", "child", ".", "tag", "]", "=", "child", ".", "text", "formatted", "=", "self", ".", "format", "(", "raw", ")", "msg_type", "=", "formatted", "[", "'type'", "]", "msg_parser", "=", "getattr", "(", "self", ",", "'parse_%s'", "%", "msg_type", ",", "None", ")", "if", "callable", "(", "msg_parser", ")", ":", "parsed", "=", "msg_parser", "(", "raw", ")", "else", ":", "parsed", "=", "self", ".", "parse_invalid_type", "(", "raw", ")", "formatted", ".", "update", "(", "parsed", ")", "return", "formatted" ]
Parse xml body sent by weixin. :param content: A text of xml body.
[ "Parse", "xml", "body", "sent", "by", "weixin", "." ]
train
https://github.com/lepture/flask-weixin/blob/abf0b507d41b9780257507aa78fc0817fdc75719/flask_weixin.py#L108-L133
lepture/flask-weixin
flask_weixin.py
Weixin.reply
def reply(self, username, type='text', sender=None, **kwargs): """Create the reply text for weixin. The reply varies per reply type. The acceptable types are `text`, `music`, `news`, `image`, `voice`, `video`. Each type accepts different parameters, but they share some common parameters: * username: the receiver's username * type: the reply type, aka text, music and news * sender: sender is optional if you have a default value Text reply requires an additional parameter of `content`. Music reply requires 4 more parameters: * title: A string for music title * description: A string for music description * music_url: A link of the music * hq_music_url: A link of the high quality music News reply requires an additional parameter of `articles`, which is a list/tuple of articles, each one is a dict: * title: A string for article title * description: A string for article description * picurl: A link for article cover image * url: A link for article url Image and Voice reply requires an additional parameter of `media_id`. Video reply requires 3 more parameters: * media_id: A string for video `media_id` * title: A string for video title * description: A string for video description """ sender = sender or self.sender if not sender: raise RuntimeError('WEIXIN_SENDER or sender argument is missing') if type == 'text': content = kwargs.get('content', '') return text_reply(username, sender, content) if type == 'music': values = {} for k in ('title', 'description', 'music_url', 'hq_music_url'): values[k] = kwargs.get(k) return music_reply(username, sender, **values) if type == 'news': items = kwargs.get('articles', []) return news_reply(username, sender, *items) if type == 'customer_service': service_account = kwargs.get('service_account', None) return transfer_customer_service_reply(username, sender, service_account) if type == 'image': media_id = kwargs.get('media_id', '') return image_reply(username, sender, media_id) if type == 'voice': media_id = kwargs.get('media_id', '') return voice_reply(username, sender, media_id) if type == 'video': values = {} for k in ('media_id', 'title', 'description'): values[k] = kwargs.get(k) return video_reply(username, sender, **values)
python
def reply(self, username, type='text', sender=None, **kwargs): """Create the reply text for weixin. The reply varies per reply type. The acceptable types are `text`, `music`, `news`, `image`, `voice`, `video`. Each type accepts different parameters, but they share some common parameters: * username: the receiver's username * type: the reply type, aka text, music and news * sender: sender is optional if you have a default value Text reply requires an additional parameter of `content`. Music reply requires 4 more parameters: * title: A string for music title * description: A string for music description * music_url: A link of the music * hq_music_url: A link of the high quality music News reply requires an additional parameter of `articles`, which is a list/tuple of articles, each one is a dict: * title: A string for article title * description: A string for article description * picurl: A link for article cover image * url: A link for article url Image and Voice reply requires an additional parameter of `media_id`. Video reply requires 3 more parameters: * media_id: A string for video `media_id` * title: A string for video title * description: A string for video description """ sender = sender or self.sender if not sender: raise RuntimeError('WEIXIN_SENDER or sender argument is missing') if type == 'text': content = kwargs.get('content', '') return text_reply(username, sender, content) if type == 'music': values = {} for k in ('title', 'description', 'music_url', 'hq_music_url'): values[k] = kwargs.get(k) return music_reply(username, sender, **values) if type == 'news': items = kwargs.get('articles', []) return news_reply(username, sender, *items) if type == 'customer_service': service_account = kwargs.get('service_account', None) return transfer_customer_service_reply(username, sender, service_account) if type == 'image': media_id = kwargs.get('media_id', '') return image_reply(username, sender, media_id) if type == 'voice': media_id = kwargs.get('media_id', '') return voice_reply(username, sender, media_id) if type == 'video': values = {} for k in ('media_id', 'title', 'description'): values[k] = kwargs.get(k) return video_reply(username, sender, **values)
[ "def", "reply", "(", "self", ",", "username", ",", "type", "=", "'text'", ",", "sender", "=", "None", ",", "*", "*", "kwargs", ")", ":", "sender", "=", "sender", "or", "self", ".", "sender", "if", "not", "sender", ":", "raise", "RuntimeError", "(", "'WEIXIN_SENDER or sender argument is missing'", ")", "if", "type", "==", "'text'", ":", "content", "=", "kwargs", ".", "get", "(", "'content'", ",", "''", ")", "return", "text_reply", "(", "username", ",", "sender", ",", "content", ")", "if", "type", "==", "'music'", ":", "values", "=", "{", "}", "for", "k", "in", "(", "'title'", ",", "'description'", ",", "'music_url'", ",", "'hq_music_url'", ")", ":", "values", "[", "k", "]", "=", "kwargs", ".", "get", "(", "k", ")", "return", "music_reply", "(", "username", ",", "sender", ",", "*", "*", "values", ")", "if", "type", "==", "'news'", ":", "items", "=", "kwargs", ".", "get", "(", "'articles'", ",", "[", "]", ")", "return", "news_reply", "(", "username", ",", "sender", ",", "*", "items", ")", "if", "type", "==", "'customer_service'", ":", "service_account", "=", "kwargs", ".", "get", "(", "'service_account'", ",", "None", ")", "return", "transfer_customer_service_reply", "(", "username", ",", "sender", ",", "service_account", ")", "if", "type", "==", "'image'", ":", "media_id", "=", "kwargs", ".", "get", "(", "'media_id'", ",", "''", ")", "return", "image_reply", "(", "username", ",", "sender", ",", "media_id", ")", "if", "type", "==", "'voice'", ":", "media_id", "=", "kwargs", ".", "get", "(", "'media_id'", ",", "''", ")", "return", "voice_reply", "(", "username", ",", "sender", ",", "media_id", ")", "if", "type", "==", "'video'", ":", "values", "=", "{", "}", "for", "k", "in", "(", "'media_id'", ",", "'title'", ",", "'description'", ")", ":", "values", "[", "k", "]", "=", "kwargs", ".", "get", "(", "k", ")", "return", "video_reply", "(", "username", ",", "sender", ",", "*", "*", "values", ")" ]
Create the reply text for weixin. The reply varies per reply type. The acceptable types are `text`, `music`, `news`, `image`, `voice`, `video`. Each type accepts different parameters, but they share some common parameters: * username: the receiver's username * type: the reply type, aka text, music and news * sender: sender is optional if you have a default value Text reply requires an additional parameter of `content`. Music reply requires 4 more parameters: * title: A string for music title * description: A string for music description * music_url: A link of the music * hq_music_url: A link of the high quality music News reply requires an additional parameter of `articles`, which is a list/tuple of articles, each one is a dict: * title: A string for article title * description: A string for article description * picurl: A link for article cover image * url: A link for article url Image and Voice reply requires an additional parameter of `media_id`. Video reply requires 3 more parameters: * media_id: A string for video `media_id` * title: A string for video title * description: A string for video description
[ "Create", "the", "reply", "text", "for", "weixin", "." ]
train
https://github.com/lepture/flask-weixin/blob/abf0b507d41b9780257507aa78fc0817fdc75719/flask_weixin.py#L187-L258
lepture/flask-weixin
flask_weixin.py
Weixin.register
def register(self, key=None, func=None, **kwargs): """Register a command helper function. You can register the function:: def print_help(**kwargs): username = kwargs.get('sender') sender = kwargs.get('receiver') return weixin.reply( username, sender=sender, content='text reply' ) weixin.register('help', print_help) It is also accessible as a decorator:: @weixin.register('help') def print_help(*args, **kwargs): username = kwargs.get('sender') sender = kwargs.get('receiver') return weixin.reply( username, sender=sender, content='text reply' ) """ if func: if key is None: limitation = frozenset(kwargs.items()) self._registry_without_key.append((func, limitation)) else: self._registry[key] = func return func return self.__call__(key, **kwargs)
python
def register(self, key=None, func=None, **kwargs): """Register a command helper function. You can register the function:: def print_help(**kwargs): username = kwargs.get('sender') sender = kwargs.get('receiver') return weixin.reply( username, sender=sender, content='text reply' ) weixin.register('help', print_help) It is also accessible as a decorator:: @weixin.register('help') def print_help(*args, **kwargs): username = kwargs.get('sender') sender = kwargs.get('receiver') return weixin.reply( username, sender=sender, content='text reply' ) """ if func: if key is None: limitation = frozenset(kwargs.items()) self._registry_without_key.append((func, limitation)) else: self._registry[key] = func return func return self.__call__(key, **kwargs)
[ "def", "register", "(", "self", ",", "key", "=", "None", ",", "func", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "func", ":", "if", "key", "is", "None", ":", "limitation", "=", "frozenset", "(", "kwargs", ".", "items", "(", ")", ")", "self", ".", "_registry_without_key", ".", "append", "(", "(", "func", ",", "limitation", ")", ")", "else", ":", "self", ".", "_registry", "[", "key", "]", "=", "func", "return", "func", "return", "self", ".", "__call__", "(", "key", ",", "*", "*", "kwargs", ")" ]
Register a command helper function. You can register the function:: def print_help(**kwargs): username = kwargs.get('sender') sender = kwargs.get('receiver') return weixin.reply( username, sender=sender, content='text reply' ) weixin.register('help', print_help) It is also accessible as a decorator:: @weixin.register('help') def print_help(*args, **kwargs): username = kwargs.get('sender') sender = kwargs.get('receiver') return weixin.reply( username, sender=sender, content='text reply' )
[ "Register", "a", "command", "helper", "function", "." ]
train
https://github.com/lepture/flask-weixin/blob/abf0b507d41b9780257507aa78fc0817fdc75719/flask_weixin.py#L260-L292
lepture/flask-weixin
flask_weixin.py
Weixin.view_func
def view_func(self): """Default view function for Flask app. This is a simple implementation for view func, you can add it to your Flask app:: weixin = Weixin(app) app.add_url_rule('/', view_func=weixin.view_func) """ if request is None: raise RuntimeError('view_func need Flask be installed') signature = request.args.get('signature') timestamp = request.args.get('timestamp') nonce = request.args.get('nonce') if not self.validate(signature, timestamp, nonce): return 'signature failed', 400 if request.method == 'GET': echostr = request.args.get('echostr', '') return echostr try: ret = self.parse(request.data) except ValueError: return 'invalid', 400 if 'type' not in ret: # not a valid message return 'invalid', 400 if ret['type'] == 'text' and ret['content'] in self._registry: func = self._registry[ret['content']] else: ret_set = frozenset(ret.items()) matched_rules = ( _func for _func, _limitation in self._registry_without_key if _limitation.issubset(ret_set)) func = next(matched_rules, None) # first matched rule if func is None: if '*' in self._registry: func = self._registry['*'] else: func = 'failed' if callable(func): text = func(**ret) else: # plain text text = self.reply( username=ret['sender'], sender=ret['receiver'], content=func, ) return Response(text, content_type='text/xml; charset=utf-8')
python
def view_func(self): """Default view function for Flask app. This is a simple implementation for view func, you can add it to your Flask app:: weixin = Weixin(app) app.add_url_rule('/', view_func=weixin.view_func) """ if request is None: raise RuntimeError('view_func need Flask be installed') signature = request.args.get('signature') timestamp = request.args.get('timestamp') nonce = request.args.get('nonce') if not self.validate(signature, timestamp, nonce): return 'signature failed', 400 if request.method == 'GET': echostr = request.args.get('echostr', '') return echostr try: ret = self.parse(request.data) except ValueError: return 'invalid', 400 if 'type' not in ret: # not a valid message return 'invalid', 400 if ret['type'] == 'text' and ret['content'] in self._registry: func = self._registry[ret['content']] else: ret_set = frozenset(ret.items()) matched_rules = ( _func for _func, _limitation in self._registry_without_key if _limitation.issubset(ret_set)) func = next(matched_rules, None) # first matched rule if func is None: if '*' in self._registry: func = self._registry['*'] else: func = 'failed' if callable(func): text = func(**ret) else: # plain text text = self.reply( username=ret['sender'], sender=ret['receiver'], content=func, ) return Response(text, content_type='text/xml; charset=utf-8')
[ "def", "view_func", "(", "self", ")", ":", "if", "request", "is", "None", ":", "raise", "RuntimeError", "(", "'view_func need Flask be installed'", ")", "signature", "=", "request", ".", "args", ".", "get", "(", "'signature'", ")", "timestamp", "=", "request", ".", "args", ".", "get", "(", "'timestamp'", ")", "nonce", "=", "request", ".", "args", ".", "get", "(", "'nonce'", ")", "if", "not", "self", ".", "validate", "(", "signature", ",", "timestamp", ",", "nonce", ")", ":", "return", "'signature failed'", ",", "400", "if", "request", ".", "method", "==", "'GET'", ":", "echostr", "=", "request", ".", "args", ".", "get", "(", "'echostr'", ",", "''", ")", "return", "echostr", "try", ":", "ret", "=", "self", ".", "parse", "(", "request", ".", "data", ")", "except", "ValueError", ":", "return", "'invalid'", ",", "400", "if", "'type'", "not", "in", "ret", ":", "# not a valid message", "return", "'invalid'", ",", "400", "if", "ret", "[", "'type'", "]", "==", "'text'", "and", "ret", "[", "'content'", "]", "in", "self", ".", "_registry", ":", "func", "=", "self", ".", "_registry", "[", "ret", "[", "'content'", "]", "]", "else", ":", "ret_set", "=", "frozenset", "(", "ret", ".", "items", "(", ")", ")", "matched_rules", "=", "(", "_func", "for", "_func", ",", "_limitation", "in", "self", ".", "_registry_without_key", "if", "_limitation", ".", "issubset", "(", "ret_set", ")", ")", "func", "=", "next", "(", "matched_rules", ",", "None", ")", "# first matched rule", "if", "func", "is", "None", ":", "if", "'*'", "in", "self", ".", "_registry", ":", "func", "=", "self", ".", "_registry", "[", "'*'", "]", "else", ":", "func", "=", "'failed'", "if", "callable", "(", "func", ")", ":", "text", "=", "func", "(", "*", "*", "ret", ")", "else", ":", "# plain text", "text", "=", "self", ".", "reply", "(", "username", "=", "ret", "[", "'sender'", "]", ",", "sender", "=", "ret", "[", "'receiver'", "]", ",", "content", "=", "func", ",", ")", "return", "Response", "(", "text", ",", "content_type", "=", "'text/xml; charset=utf-8'", ")" ]
Default view function for Flask app. This is a simple implementation for view func, you can add it to your Flask app:: weixin = Weixin(app) app.add_url_rule('/', view_func=weixin.view_func)
[ "Default", "view", "function", "for", "Flask", "app", "." ]
train
https://github.com/lepture/flask-weixin/blob/abf0b507d41b9780257507aa78fc0817fdc75719/flask_weixin.py#L313-L369
cuducos/getgist
getgist/__main__.py
run_getgist
def run_getgist(filename, user, **kwargs): """Passes user inputs to GetGist() and calls get()""" assume_yes = kwargs.get("yes_to_all") getgist = GetGist(user=user, filename=filename, assume_yes=assume_yes) getgist.get()
python
def run_getgist(filename, user, **kwargs): """Passes user inputs to GetGist() and calls get()""" assume_yes = kwargs.get("yes_to_all") getgist = GetGist(user=user, filename=filename, assume_yes=assume_yes) getgist.get()
[ "def", "run_getgist", "(", "filename", ",", "user", ",", "*", "*", "kwargs", ")", ":", "assume_yes", "=", "kwargs", ".", "get", "(", "\"yes_to_all\"", ")", "getgist", "=", "GetGist", "(", "user", "=", "user", ",", "filename", "=", "filename", ",", "assume_yes", "=", "assume_yes", ")", "getgist", ".", "get", "(", ")" ]
Passes user inputs to GetGist() and calls get()
[ "Passes", "user", "inputs", "to", "GetGist", "()", "and", "calls", "get", "()" ]
train
https://github.com/cuducos/getgist/blob/c70a0a9353eca43360b82c759d1e1514ec265d3b/getgist/__main__.py#L107-L111
cuducos/getgist
getgist/__main__.py
run_getmy
def run_getmy(filename, **kwargs): """Shortcut for run_getgist() reading username from env var""" assume_yes = kwargs.get("yes_to_all") user = getenv("GETGIST_USER") getgist = GetGist(user=user, filename=filename, assume_yes=assume_yes) getgist.get()
python
def run_getmy(filename, **kwargs): """Shortcut for run_getgist() reading username from env var""" assume_yes = kwargs.get("yes_to_all") user = getenv("GETGIST_USER") getgist = GetGist(user=user, filename=filename, assume_yes=assume_yes) getgist.get()
[ "def", "run_getmy", "(", "filename", ",", "*", "*", "kwargs", ")", ":", "assume_yes", "=", "kwargs", ".", "get", "(", "\"yes_to_all\"", ")", "user", "=", "getenv", "(", "\"GETGIST_USER\"", ")", "getgist", "=", "GetGist", "(", "user", "=", "user", ",", "filename", "=", "filename", ",", "assume_yes", "=", "assume_yes", ")", "getgist", ".", "get", "(", ")" ]
Shortcut for run_getgist() reading username from env var
[ "Shortcut", "for", "run_getgist", "()", "reading", "username", "from", "env", "var" ]
train
https://github.com/cuducos/getgist/blob/c70a0a9353eca43360b82c759d1e1514ec265d3b/getgist/__main__.py#L117-L122
cuducos/getgist
getgist/__main__.py
run_putgist
def run_putgist(filename, user, **kwargs): """Passes user inputs to GetGist() and calls put()""" assume_yes = kwargs.get("yes_to_all") private = kwargs.get("private") getgist = GetGist( user=user, filename=filename, assume_yes=assume_yes, create_private=private, allow_none=True, ) getgist.put()
python
def run_putgist(filename, user, **kwargs): """Passes user inputs to GetGist() and calls put()""" assume_yes = kwargs.get("yes_to_all") private = kwargs.get("private") getgist = GetGist( user=user, filename=filename, assume_yes=assume_yes, create_private=private, allow_none=True, ) getgist.put()
[ "def", "run_putgist", "(", "filename", ",", "user", ",", "*", "*", "kwargs", ")", ":", "assume_yes", "=", "kwargs", ".", "get", "(", "\"yes_to_all\"", ")", "private", "=", "kwargs", ".", "get", "(", "\"private\"", ")", "getgist", "=", "GetGist", "(", "user", "=", "user", ",", "filename", "=", "filename", ",", "assume_yes", "=", "assume_yes", ",", "create_private", "=", "private", ",", "allow_none", "=", "True", ",", ")", "getgist", ".", "put", "(", ")" ]
Passes user inputs to GetGist() and calls put()
[ "Passes", "user", "inputs", "to", "GetGist", "()", "and", "calls", "put", "()" ]
train
https://github.com/cuducos/getgist/blob/c70a0a9353eca43360b82c759d1e1514ec265d3b/getgist/__main__.py#L130-L141
cuducos/getgist
getgist/__main__.py
GetGist.get
def get(self): """Reads the remote file from Gist and save it locally""" if self.gist: content = self.github.read_gist_file(self.gist) self.local.save(content)
python
def get(self): """Reads the remote file from Gist and save it locally""" if self.gist: content = self.github.read_gist_file(self.gist) self.local.save(content)
[ "def", "get", "(", "self", ")", ":", "if", "self", ".", "gist", ":", "content", "=", "self", ".", "github", ".", "read_gist_file", "(", "self", ".", "gist", ")", "self", ".", "local", ".", "save", "(", "content", ")" ]
Reads the remote file from Gist and save it locally
[ "Reads", "the", "remote", "file", "from", "Gist", "and", "save", "it", "locally" ]
train
https://github.com/cuducos/getgist/blob/c70a0a9353eca43360b82c759d1e1514ec265d3b/getgist/__main__.py#L88-L92
cuducos/getgist
getgist/__main__.py
GetGist.put
def put(self): """ Reads local file & update the remote gist (or create a new one)""" content = self.local.read() if self.gist: self.github.update(self.gist, content) else: self.github.create(content, public=self.public)
python
def put(self): """ Reads local file & update the remote gist (or create a new one)""" content = self.local.read() if self.gist: self.github.update(self.gist, content) else: self.github.create(content, public=self.public)
[ "def", "put", "(", "self", ")", ":", "content", "=", "self", ".", "local", ".", "read", "(", ")", "if", "self", ".", "gist", ":", "self", ".", "github", ".", "update", "(", "self", ".", "gist", ",", "content", ")", "else", ":", "self", ".", "github", ".", "create", "(", "content", ",", "public", "=", "self", ".", "public", ")" ]
Reads local file & update the remote gist (or create a new one)
[ "Reads", "local", "file", "&", "update", "the", "remote", "gist", "(", "or", "create", "a", "new", "one", ")" ]
train
https://github.com/cuducos/getgist/blob/c70a0a9353eca43360b82c759d1e1514ec265d3b/getgist/__main__.py#L94-L100
cuducos/getgist
getgist/github.py
oauth_only
def oauth_only(function): """Decorator to restrict some GitHubTools methods to run only with OAuth""" def check_for_oauth(self, *args, **kwargs): """ Returns False if GitHubTools instance is not authenticated, or return the decorated fucntion if it is. """ if not self.is_authenticated: self.oops("To use putgist you have to set your GETGIST_TOKEN") self.oops("(see `putgist --help` for details)") return False return function(self, *args, **kwargs) return check_for_oauth
python
def oauth_only(function): """Decorator to restrict some GitHubTools methods to run only with OAuth""" def check_for_oauth(self, *args, **kwargs): """ Returns False if GitHubTools instance is not authenticated, or return the decorated fucntion if it is. """ if not self.is_authenticated: self.oops("To use putgist you have to set your GETGIST_TOKEN") self.oops("(see `putgist --help` for details)") return False return function(self, *args, **kwargs) return check_for_oauth
[ "def", "oauth_only", "(", "function", ")", ":", "def", "check_for_oauth", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"\n Returns False if GitHubTools instance is not authenticated, or return\n the decorated fucntion if it is.\n \"\"\"", "if", "not", "self", ".", "is_authenticated", ":", "self", ".", "oops", "(", "\"To use putgist you have to set your GETGIST_TOKEN\"", ")", "self", ".", "oops", "(", "\"(see `putgist --help` for details)\"", ")", "return", "False", "return", "function", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "check_for_oauth" ]
Decorator to restrict some GitHubTools methods to run only with OAuth
[ "Decorator", "to", "restrict", "some", "GitHubTools", "methods", "to", "run", "only", "with", "OAuth" ]
train
https://github.com/cuducos/getgist/blob/c70a0a9353eca43360b82c759d1e1514ec265d3b/getgist/github.py#L11-L25
cuducos/getgist
getgist/github.py
GitHubTools.add_oauth_header
def add_oauth_header(self): """ Validate token and add the proper header for further requests. :return: (None) """ # abort if no token oauth_token = self._get_token() if not oauth_token: return # add oauth header & reach the api self.headers["Authorization"] = "token " + oauth_token url = self._api_url("user") raw_resp = self.requests.get(url) resp = raw_resp.json() # abort & remove header if token is invalid if resp.get("login", None) != self.user: self.oops("Invalid token for user " + self.user) self.headers.pop("Authorization") return self.is_authenticated = True self.yeah("User {} authenticated".format(self.user))
python
def add_oauth_header(self): """ Validate token and add the proper header for further requests. :return: (None) """ # abort if no token oauth_token = self._get_token() if not oauth_token: return # add oauth header & reach the api self.headers["Authorization"] = "token " + oauth_token url = self._api_url("user") raw_resp = self.requests.get(url) resp = raw_resp.json() # abort & remove header if token is invalid if resp.get("login", None) != self.user: self.oops("Invalid token for user " + self.user) self.headers.pop("Authorization") return self.is_authenticated = True self.yeah("User {} authenticated".format(self.user))
[ "def", "add_oauth_header", "(", "self", ")", ":", "# abort if no token", "oauth_token", "=", "self", ".", "_get_token", "(", ")", "if", "not", "oauth_token", ":", "return", "# add oauth header & reach the api", "self", ".", "headers", "[", "\"Authorization\"", "]", "=", "\"token \"", "+", "oauth_token", "url", "=", "self", ".", "_api_url", "(", "\"user\"", ")", "raw_resp", "=", "self", ".", "requests", ".", "get", "(", "url", ")", "resp", "=", "raw_resp", ".", "json", "(", ")", "# abort & remove header if token is invalid", "if", "resp", ".", "get", "(", "\"login\"", ",", "None", ")", "!=", "self", ".", "user", ":", "self", ".", "oops", "(", "\"Invalid token for user \"", "+", "self", ".", "user", ")", "self", ".", "headers", ".", "pop", "(", "\"Authorization\"", ")", "return", "self", ".", "is_authenticated", "=", "True", "self", ".", "yeah", "(", "\"User {} authenticated\"", ".", "format", "(", "self", ".", "user", ")", ")" ]
Validate token and add the proper header for further requests. :return: (None)
[ "Validate", "token", "and", "add", "the", "proper", "header", "for", "further", "requests", ".", ":", "return", ":", "(", "None", ")" ]
train
https://github.com/cuducos/getgist/blob/c70a0a9353eca43360b82c759d1e1514ec265d3b/getgist/github.py#L56-L79
cuducos/getgist
getgist/github.py
GitHubTools.get_gists
def get_gists(self): """ List generator containing gist relevant information such as id, description, filenames and raw URL (dict). """ # fetch all gists if self.is_authenticated: url = self._api_url("gists") else: url = self._api_url("users", self.user, "gists") self.output("Fetching " + url) raw_resp = self.requests.get(url) # abort if user not found if raw_resp.status_code != 200: self.oops("User `{}` not found".format(self.user)) return # abort if there are no gists resp = raw_resp.json() if not resp: self.oops("No gists found for user `{}`".format(self.user)) return # parse response for gist in raw_resp.json(): yield self._parse_gist(gist)
python
def get_gists(self): """ List generator containing gist relevant information such as id, description, filenames and raw URL (dict). """ # fetch all gists if self.is_authenticated: url = self._api_url("gists") else: url = self._api_url("users", self.user, "gists") self.output("Fetching " + url) raw_resp = self.requests.get(url) # abort if user not found if raw_resp.status_code != 200: self.oops("User `{}` not found".format(self.user)) return # abort if there are no gists resp = raw_resp.json() if not resp: self.oops("No gists found for user `{}`".format(self.user)) return # parse response for gist in raw_resp.json(): yield self._parse_gist(gist)
[ "def", "get_gists", "(", "self", ")", ":", "# fetch all gists", "if", "self", ".", "is_authenticated", ":", "url", "=", "self", ".", "_api_url", "(", "\"gists\"", ")", "else", ":", "url", "=", "self", ".", "_api_url", "(", "\"users\"", ",", "self", ".", "user", ",", "\"gists\"", ")", "self", ".", "output", "(", "\"Fetching \"", "+", "url", ")", "raw_resp", "=", "self", ".", "requests", ".", "get", "(", "url", ")", "# abort if user not found", "if", "raw_resp", ".", "status_code", "!=", "200", ":", "self", ".", "oops", "(", "\"User `{}` not found\"", ".", "format", "(", "self", ".", "user", ")", ")", "return", "# abort if there are no gists", "resp", "=", "raw_resp", ".", "json", "(", ")", "if", "not", "resp", ":", "self", ".", "oops", "(", "\"No gists found for user `{}`\"", ".", "format", "(", "self", ".", "user", ")", ")", "return", "# parse response", "for", "gist", "in", "raw_resp", ".", "json", "(", ")", ":", "yield", "self", ".", "_parse_gist", "(", "gist", ")" ]
List generator containing gist relevant information such as id, description, filenames and raw URL (dict).
[ "List", "generator", "containing", "gist", "relevant", "information", "such", "as", "id", "description", "filenames", "and", "raw", "URL", "(", "dict", ")", "." ]
train
https://github.com/cuducos/getgist/blob/c70a0a9353eca43360b82c759d1e1514ec265d3b/getgist/github.py#L81-L107
cuducos/getgist
getgist/github.py
GitHubTools.select_gist
def select_gist(self, allow_none=False): """ Given the requested filename, it selects the proper gist; if more than one gist is found with the given filename, user is asked to choose. :allow_none: (bool) for `getgist` it should raise error if no gist is found, but setting this argument to True avoid this error, which is useful when `putgist` is calling this method :return: (dict) selected gist """ # pick up all macthing gists matches = list() for gist in self.get_gists(): for gist_file in gist.get("files"): if self.filename == gist_file.get("filename"): matches.append(gist) # abort if no match is found if not matches: if allow_none: return None else: msg = "No file named `{}` found in {}'s gists" self.oops(msg.format(self.file_path, self.user)) if not self.is_authenticated: self.warn("To access private gists set the GETGIST_TOKEN") self.warn("(see `getgist --help` for details)") return False # return if there's is only one match if len(matches) == 1 or self.assume_yes: return matches.pop(0) return self._ask_which_gist(matches)
python
def select_gist(self, allow_none=False): """ Given the requested filename, it selects the proper gist; if more than one gist is found with the given filename, user is asked to choose. :allow_none: (bool) for `getgist` it should raise error if no gist is found, but setting this argument to True avoid this error, which is useful when `putgist` is calling this method :return: (dict) selected gist """ # pick up all macthing gists matches = list() for gist in self.get_gists(): for gist_file in gist.get("files"): if self.filename == gist_file.get("filename"): matches.append(gist) # abort if no match is found if not matches: if allow_none: return None else: msg = "No file named `{}` found in {}'s gists" self.oops(msg.format(self.file_path, self.user)) if not self.is_authenticated: self.warn("To access private gists set the GETGIST_TOKEN") self.warn("(see `getgist --help` for details)") return False # return if there's is only one match if len(matches) == 1 or self.assume_yes: return matches.pop(0) return self._ask_which_gist(matches)
[ "def", "select_gist", "(", "self", ",", "allow_none", "=", "False", ")", ":", "# pick up all macthing gists", "matches", "=", "list", "(", ")", "for", "gist", "in", "self", ".", "get_gists", "(", ")", ":", "for", "gist_file", "in", "gist", ".", "get", "(", "\"files\"", ")", ":", "if", "self", ".", "filename", "==", "gist_file", ".", "get", "(", "\"filename\"", ")", ":", "matches", ".", "append", "(", "gist", ")", "# abort if no match is found", "if", "not", "matches", ":", "if", "allow_none", ":", "return", "None", "else", ":", "msg", "=", "\"No file named `{}` found in {}'s gists\"", "self", ".", "oops", "(", "msg", ".", "format", "(", "self", ".", "file_path", ",", "self", ".", "user", ")", ")", "if", "not", "self", ".", "is_authenticated", ":", "self", ".", "warn", "(", "\"To access private gists set the GETGIST_TOKEN\"", ")", "self", ".", "warn", "(", "\"(see `getgist --help` for details)\"", ")", "return", "False", "# return if there's is only one match", "if", "len", "(", "matches", ")", "==", "1", "or", "self", ".", "assume_yes", ":", "return", "matches", ".", "pop", "(", "0", ")", "return", "self", ".", "_ask_which_gist", "(", "matches", ")" ]
Given the requested filename, it selects the proper gist; if more than one gist is found with the given filename, user is asked to choose. :allow_none: (bool) for `getgist` it should raise error if no gist is found, but setting this argument to True avoid this error, which is useful when `putgist` is calling this method :return: (dict) selected gist
[ "Given", "the", "requested", "filename", "it", "selects", "the", "proper", "gist", ";", "if", "more", "than", "one", "gist", "is", "found", "with", "the", "given", "filename", "user", "is", "asked", "to", "choose", ".", ":", "allow_none", ":", "(", "bool", ")", "for", "getgist", "it", "should", "raise", "error", "if", "no", "gist", "is", "found", "but", "setting", "this", "argument", "to", "True", "avoid", "this", "error", "which", "is", "useful", "when", "putgist", "is", "calling", "this", "method", ":", "return", ":", "(", "dict", ")", "selected", "gist" ]
train
https://github.com/cuducos/getgist/blob/c70a0a9353eca43360b82c759d1e1514ec265d3b/getgist/github.py#L109-L141
cuducos/getgist
getgist/github.py
GitHubTools.read_gist_file
def read_gist_file(self, gist): """ Returns the contents of file hosted inside a gist at GitHub. :param gist: (dict) gist parsed by GitHubTools._parse() :return: (bytes) content of a gist loaded from GitHub """ url = False files = gist.get("files") for gist_file in files: if gist_file.get("filename") == self.filename: url = gist_file.get("raw_url") break if url: self.output("Reading {}".format(url)) response = self.requests.get(url) return response.content
python
def read_gist_file(self, gist): """ Returns the contents of file hosted inside a gist at GitHub. :param gist: (dict) gist parsed by GitHubTools._parse() :return: (bytes) content of a gist loaded from GitHub """ url = False files = gist.get("files") for gist_file in files: if gist_file.get("filename") == self.filename: url = gist_file.get("raw_url") break if url: self.output("Reading {}".format(url)) response = self.requests.get(url) return response.content
[ "def", "read_gist_file", "(", "self", ",", "gist", ")", ":", "url", "=", "False", "files", "=", "gist", ".", "get", "(", "\"files\"", ")", "for", "gist_file", "in", "files", ":", "if", "gist_file", ".", "get", "(", "\"filename\"", ")", "==", "self", ".", "filename", ":", "url", "=", "gist_file", ".", "get", "(", "\"raw_url\"", ")", "break", "if", "url", ":", "self", ".", "output", "(", "\"Reading {}\"", ".", "format", "(", "url", ")", ")", "response", "=", "self", ".", "requests", ".", "get", "(", "url", ")", "return", "response", ".", "content" ]
Returns the contents of file hosted inside a gist at GitHub. :param gist: (dict) gist parsed by GitHubTools._parse() :return: (bytes) content of a gist loaded from GitHub
[ "Returns", "the", "contents", "of", "file", "hosted", "inside", "a", "gist", "at", "GitHub", ".", ":", "param", "gist", ":", "(", "dict", ")", "gist", "parsed", "by", "GitHubTools", ".", "_parse", "()", ":", "return", ":", "(", "bytes", ")", "content", "of", "a", "gist", "loaded", "from", "GitHub" ]
train
https://github.com/cuducos/getgist/blob/c70a0a9353eca43360b82c759d1e1514ec265d3b/getgist/github.py#L143-L158
cuducos/getgist
getgist/github.py
GitHubTools.update
def update(self, gist, content): """ Updates the contents of file hosted inside a gist at GitHub. :param gist: (dict) gist parsed by GitHubTools._parse_gist() :param content: (str or bytes) to be written :return: (bool) indicatind the success or failure of the update """ # abort if content is False if content is False: return False # request url = self._api_url("gists", gist.get("id")) data = {"files": {self.filename: {"content": content}}} self.output("Sending contents of {} to {}".format(self.file_path, url)) response = self.requests.patch(url, data=dumps(data)) # error if response.status_code != 200: self.oops("Could not update " + gist.get("description")) self.oops("PATCH request returned " + str(response.status_code)) return False # success self.yeah("Done!") self.hey("The URL to this Gist is: {}".format(gist["url"])) return True
python
def update(self, gist, content): """ Updates the contents of file hosted inside a gist at GitHub. :param gist: (dict) gist parsed by GitHubTools._parse_gist() :param content: (str or bytes) to be written :return: (bool) indicatind the success or failure of the update """ # abort if content is False if content is False: return False # request url = self._api_url("gists", gist.get("id")) data = {"files": {self.filename: {"content": content}}} self.output("Sending contents of {} to {}".format(self.file_path, url)) response = self.requests.patch(url, data=dumps(data)) # error if response.status_code != 200: self.oops("Could not update " + gist.get("description")) self.oops("PATCH request returned " + str(response.status_code)) return False # success self.yeah("Done!") self.hey("The URL to this Gist is: {}".format(gist["url"])) return True
[ "def", "update", "(", "self", ",", "gist", ",", "content", ")", ":", "# abort if content is False", "if", "content", "is", "False", ":", "return", "False", "# request", "url", "=", "self", ".", "_api_url", "(", "\"gists\"", ",", "gist", ".", "get", "(", "\"id\"", ")", ")", "data", "=", "{", "\"files\"", ":", "{", "self", ".", "filename", ":", "{", "\"content\"", ":", "content", "}", "}", "}", "self", ".", "output", "(", "\"Sending contents of {} to {}\"", ".", "format", "(", "self", ".", "file_path", ",", "url", ")", ")", "response", "=", "self", ".", "requests", ".", "patch", "(", "url", ",", "data", "=", "dumps", "(", "data", ")", ")", "# error", "if", "response", ".", "status_code", "!=", "200", ":", "self", ".", "oops", "(", "\"Could not update \"", "+", "gist", ".", "get", "(", "\"description\"", ")", ")", "self", ".", "oops", "(", "\"PATCH request returned \"", "+", "str", "(", "response", ".", "status_code", ")", ")", "return", "False", "# success", "self", ".", "yeah", "(", "\"Done!\"", ")", "self", ".", "hey", "(", "\"The URL to this Gist is: {}\"", ".", "format", "(", "gist", "[", "\"url\"", "]", ")", ")", "return", "True" ]
Updates the contents of file hosted inside a gist at GitHub. :param gist: (dict) gist parsed by GitHubTools._parse_gist() :param content: (str or bytes) to be written :return: (bool) indicatind the success or failure of the update
[ "Updates", "the", "contents", "of", "file", "hosted", "inside", "a", "gist", "at", "GitHub", ".", ":", "param", "gist", ":", "(", "dict", ")", "gist", "parsed", "by", "GitHubTools", ".", "_parse_gist", "()", ":", "param", "content", ":", "(", "str", "or", "bytes", ")", "to", "be", "written", ":", "return", ":", "(", "bool", ")", "indicatind", "the", "success", "or", "failure", "of", "the", "update" ]
train
https://github.com/cuducos/getgist/blob/c70a0a9353eca43360b82c759d1e1514ec265d3b/getgist/github.py#L161-L187
cuducos/getgist
getgist/github.py
GitHubTools.create
def create(self, content, **kwargs): """ Create a new gist. :param gist: (dict) gist parsed by GitHubTools._parse() :param content: (str or bytes) to be written :param public: (bool) defines if the gist is public or private :return: (bool) indicatind the success or failure of the creation """ # abort if content is False if content is False: return False # set new gist public = bool(kwargs.get("public", True)) data = { "description": self.filename, "public": public, "files": {self.filename: {"content": content}}, } # send request url = self._api_url("gists") self.output("Sending contents of {} to {}".format(self.file_path, url)) response = self.requests.post(url, data=dumps(data)) # error if response.status_code != 201: self.oops("Could not create " + self.filename) self.oops("POST request returned " + str(response.status_code)) return False # parse created gist gist = self._parse_gist(response.json()) # success self.yeah("Done!") self.hey("The URL to this Gist is: {}".format(gist["url"])) return True
python
def create(self, content, **kwargs): """ Create a new gist. :param gist: (dict) gist parsed by GitHubTools._parse() :param content: (str or bytes) to be written :param public: (bool) defines if the gist is public or private :return: (bool) indicatind the success or failure of the creation """ # abort if content is False if content is False: return False # set new gist public = bool(kwargs.get("public", True)) data = { "description": self.filename, "public": public, "files": {self.filename: {"content": content}}, } # send request url = self._api_url("gists") self.output("Sending contents of {} to {}".format(self.file_path, url)) response = self.requests.post(url, data=dumps(data)) # error if response.status_code != 201: self.oops("Could not create " + self.filename) self.oops("POST request returned " + str(response.status_code)) return False # parse created gist gist = self._parse_gist(response.json()) # success self.yeah("Done!") self.hey("The URL to this Gist is: {}".format(gist["url"])) return True
[ "def", "create", "(", "self", ",", "content", ",", "*", "*", "kwargs", ")", ":", "# abort if content is False", "if", "content", "is", "False", ":", "return", "False", "# set new gist", "public", "=", "bool", "(", "kwargs", ".", "get", "(", "\"public\"", ",", "True", ")", ")", "data", "=", "{", "\"description\"", ":", "self", ".", "filename", ",", "\"public\"", ":", "public", ",", "\"files\"", ":", "{", "self", ".", "filename", ":", "{", "\"content\"", ":", "content", "}", "}", ",", "}", "# send request", "url", "=", "self", ".", "_api_url", "(", "\"gists\"", ")", "self", ".", "output", "(", "\"Sending contents of {} to {}\"", ".", "format", "(", "self", ".", "file_path", ",", "url", ")", ")", "response", "=", "self", ".", "requests", ".", "post", "(", "url", ",", "data", "=", "dumps", "(", "data", ")", ")", "# error", "if", "response", ".", "status_code", "!=", "201", ":", "self", ".", "oops", "(", "\"Could not create \"", "+", "self", ".", "filename", ")", "self", ".", "oops", "(", "\"POST request returned \"", "+", "str", "(", "response", ".", "status_code", ")", ")", "return", "False", "# parse created gist", "gist", "=", "self", ".", "_parse_gist", "(", "response", ".", "json", "(", ")", ")", "# success", "self", ".", "yeah", "(", "\"Done!\"", ")", "self", ".", "hey", "(", "\"The URL to this Gist is: {}\"", ".", "format", "(", "gist", "[", "\"url\"", "]", ")", ")", "return", "True" ]
Create a new gist. :param gist: (dict) gist parsed by GitHubTools._parse() :param content: (str or bytes) to be written :param public: (bool) defines if the gist is public or private :return: (bool) indicatind the success or failure of the creation
[ "Create", "a", "new", "gist", ".", ":", "param", "gist", ":", "(", "dict", ")", "gist", "parsed", "by", "GitHubTools", ".", "_parse", "()", ":", "param", "content", ":", "(", "str", "or", "bytes", ")", "to", "be", "written", ":", "param", "public", ":", "(", "bool", ")", "defines", "if", "the", "gist", "is", "public", "or", "private", ":", "return", ":", "(", "bool", ")", "indicatind", "the", "success", "or", "failure", "of", "the", "creation" ]
train
https://github.com/cuducos/getgist/blob/c70a0a9353eca43360b82c759d1e1514ec265d3b/getgist/github.py#L190-L227
cuducos/getgist
getgist/github.py
GitHubTools._ask_which_gist
def _ask_which_gist(self, matches): """ Asks user which gist to use in case of more than one gist matching the instance filename. :param matches: (list) of dictioaries generated within select_gists() :return: (dict) of the selected gist """ # ask user which gist to use self.hey("Use {} from which gist?".format(self.filename)) for count, gist in enumerate(matches, 1): self.hey("[{}] {}".format(count, gist.get("description"))) # get the gist index selected = False while not selected: gist_index = prompt("Type the number: ", type=int) - 1 try: selected = matches[gist_index] except IndexError: self.oops("Invalid number, please try again.") self.output("Using `{}` Gist".format(selected["description"])) return selected
python
def _ask_which_gist(self, matches): """ Asks user which gist to use in case of more than one gist matching the instance filename. :param matches: (list) of dictioaries generated within select_gists() :return: (dict) of the selected gist """ # ask user which gist to use self.hey("Use {} from which gist?".format(self.filename)) for count, gist in enumerate(matches, 1): self.hey("[{}] {}".format(count, gist.get("description"))) # get the gist index selected = False while not selected: gist_index = prompt("Type the number: ", type=int) - 1 try: selected = matches[gist_index] except IndexError: self.oops("Invalid number, please try again.") self.output("Using `{}` Gist".format(selected["description"])) return selected
[ "def", "_ask_which_gist", "(", "self", ",", "matches", ")", ":", "# ask user which gist to use", "self", ".", "hey", "(", "\"Use {} from which gist?\"", ".", "format", "(", "self", ".", "filename", ")", ")", "for", "count", ",", "gist", "in", "enumerate", "(", "matches", ",", "1", ")", ":", "self", ".", "hey", "(", "\"[{}] {}\"", ".", "format", "(", "count", ",", "gist", ".", "get", "(", "\"description\"", ")", ")", ")", "# get the gist index", "selected", "=", "False", "while", "not", "selected", ":", "gist_index", "=", "prompt", "(", "\"Type the number: \"", ",", "type", "=", "int", ")", "-", "1", "try", ":", "selected", "=", "matches", "[", "gist_index", "]", "except", "IndexError", ":", "self", ".", "oops", "(", "\"Invalid number, please try again.\"", ")", "self", ".", "output", "(", "\"Using `{}` Gist\"", ".", "format", "(", "selected", "[", "\"description\"", "]", ")", ")", "return", "selected" ]
Asks user which gist to use in case of more than one gist matching the instance filename. :param matches: (list) of dictioaries generated within select_gists() :return: (dict) of the selected gist
[ "Asks", "user", "which", "gist", "to", "use", "in", "case", "of", "more", "than", "one", "gist", "matching", "the", "instance", "filename", ".", ":", "param", "matches", ":", "(", "list", ")", "of", "dictioaries", "generated", "within", "select_gists", "()", ":", "return", ":", "(", "dict", ")", "of", "the", "selected", "gist" ]
train
https://github.com/cuducos/getgist/blob/c70a0a9353eca43360b82c759d1e1514ec265d3b/getgist/github.py#L229-L251
cuducos/getgist
getgist/github.py
GitHubTools._parse_gist
def _parse_gist(gist): """Receive a gist (dict) and parse it to GetGist""" # parse files files = list() file_names = sorted(filename for filename in gist["files"].keys()) for name in file_names: files.append( dict(filename=name, raw_url=gist["files"][name].get("raw_url")) ) # parse description description = gist["description"] if not description: names = sorted(f.get("filename") for f in files) description = names.pop(0) return dict( description=description, id=gist.get("id"), files=files, url=gist.get("html_url"), )
python
def _parse_gist(gist): """Receive a gist (dict) and parse it to GetGist""" # parse files files = list() file_names = sorted(filename for filename in gist["files"].keys()) for name in file_names: files.append( dict(filename=name, raw_url=gist["files"][name].get("raw_url")) ) # parse description description = gist["description"] if not description: names = sorted(f.get("filename") for f in files) description = names.pop(0) return dict( description=description, id=gist.get("id"), files=files, url=gist.get("html_url"), )
[ "def", "_parse_gist", "(", "gist", ")", ":", "# parse files", "files", "=", "list", "(", ")", "file_names", "=", "sorted", "(", "filename", "for", "filename", "in", "gist", "[", "\"files\"", "]", ".", "keys", "(", ")", ")", "for", "name", "in", "file_names", ":", "files", ".", "append", "(", "dict", "(", "filename", "=", "name", ",", "raw_url", "=", "gist", "[", "\"files\"", "]", "[", "name", "]", ".", "get", "(", "\"raw_url\"", ")", ")", ")", "# parse description", "description", "=", "gist", "[", "\"description\"", "]", "if", "not", "description", ":", "names", "=", "sorted", "(", "f", ".", "get", "(", "\"filename\"", ")", "for", "f", "in", "files", ")", "description", "=", "names", ".", "pop", "(", "0", ")", "return", "dict", "(", "description", "=", "description", ",", "id", "=", "gist", ".", "get", "(", "\"id\"", ")", ",", "files", "=", "files", ",", "url", "=", "gist", ".", "get", "(", "\"html_url\"", ")", ",", ")" ]
Receive a gist (dict) and parse it to GetGist
[ "Receive", "a", "gist", "(", "dict", ")", "and", "parse", "it", "to", "GetGist" ]
train
https://github.com/cuducos/getgist/blob/c70a0a9353eca43360b82c759d1e1514ec265d3b/getgist/github.py#L258-L280
cuducos/getgist
getgist/__init__.py
GetGistCommons.indent
def indent(self, message): """ Sets the indent for standardized output :param message: (str) :return: (str) """ indent = self.indent_char * self.indent_size return indent + message
python
def indent(self, message): """ Sets the indent for standardized output :param message: (str) :return: (str) """ indent = self.indent_char * self.indent_size return indent + message
[ "def", "indent", "(", "self", ",", "message", ")", ":", "indent", "=", "self", ".", "indent_char", "*", "self", ".", "indent_size", "return", "indent", "+", "message" ]
Sets the indent for standardized output :param message: (str) :return: (str)
[ "Sets", "the", "indent", "for", "standardized", "output", ":", "param", "message", ":", "(", "str", ")", ":", "return", ":", "(", "str", ")" ]
train
https://github.com/cuducos/getgist/blob/c70a0a9353eca43360b82c759d1e1514ec265d3b/getgist/__init__.py#L12-L19
cuducos/getgist
getgist/__init__.py
GetGistCommons.output
def output(self, message, color=None): """ A helper to used like print() or click's secho() tunneling all the outputs to sys.stdout or sys.stderr :param message: (str) :param color: (str) check click.secho() documentation :return: (None) prints to sys.stdout or sys.stderr """ output_to = stderr if color == "red" else stdout secho(self.indent(message), fg=color, file=output_to)
python
def output(self, message, color=None): """ A helper to used like print() or click's secho() tunneling all the outputs to sys.stdout or sys.stderr :param message: (str) :param color: (str) check click.secho() documentation :return: (None) prints to sys.stdout or sys.stderr """ output_to = stderr if color == "red" else stdout secho(self.indent(message), fg=color, file=output_to)
[ "def", "output", "(", "self", ",", "message", ",", "color", "=", "None", ")", ":", "output_to", "=", "stderr", "if", "color", "==", "\"red\"", "else", "stdout", "secho", "(", "self", ".", "indent", "(", "message", ")", ",", "fg", "=", "color", ",", "file", "=", "output_to", ")" ]
A helper to used like print() or click's secho() tunneling all the outputs to sys.stdout or sys.stderr :param message: (str) :param color: (str) check click.secho() documentation :return: (None) prints to sys.stdout or sys.stderr
[ "A", "helper", "to", "used", "like", "print", "()", "or", "click", "s", "secho", "()", "tunneling", "all", "the", "outputs", "to", "sys", ".", "stdout", "or", "sys", ".", "stderr", ":", "param", "message", ":", "(", "str", ")", ":", "param", "color", ":", "(", "str", ")", "check", "click", ".", "secho", "()", "documentation", ":", "return", ":", "(", "None", ")", "prints", "to", "sys", ".", "stdout", "or", "sys", ".", "stderr" ]
train
https://github.com/cuducos/getgist/blob/c70a0a9353eca43360b82c759d1e1514ec265d3b/getgist/__init__.py#L21-L30
cuducos/getgist
getgist/request.py
GetGistRequests.get
def get(self, url, params=None, **kwargs): """Encapsulte requests.get to use this class instance header""" return requests.get(url, params=params, headers=self.add_headers(**kwargs))
python
def get(self, url, params=None, **kwargs): """Encapsulte requests.get to use this class instance header""" return requests.get(url, params=params, headers=self.add_headers(**kwargs))
[ "def", "get", "(", "self", ",", "url", ",", "params", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "requests", ".", "get", "(", "url", ",", "params", "=", "params", ",", "headers", "=", "self", ".", "add_headers", "(", "*", "*", "kwargs", ")", ")" ]
Encapsulte requests.get to use this class instance header
[ "Encapsulte", "requests", ".", "get", "to", "use", "this", "class", "instance", "header" ]
train
https://github.com/cuducos/getgist/blob/c70a0a9353eca43360b82c759d1e1514ec265d3b/getgist/request.py#L26-L28
cuducos/getgist
getgist/request.py
GetGistRequests.patch
def patch(self, url, data=None, **kwargs): """Encapsulte requests.patch to use this class instance header""" return requests.patch(url, data=data, headers=self.add_headers(**kwargs))
python
def patch(self, url, data=None, **kwargs): """Encapsulte requests.patch to use this class instance header""" return requests.patch(url, data=data, headers=self.add_headers(**kwargs))
[ "def", "patch", "(", "self", ",", "url", ",", "data", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "requests", ".", "patch", "(", "url", ",", "data", "=", "data", ",", "headers", "=", "self", ".", "add_headers", "(", "*", "*", "kwargs", ")", ")" ]
Encapsulte requests.patch to use this class instance header
[ "Encapsulte", "requests", ".", "patch", "to", "use", "this", "class", "instance", "header" ]
train
https://github.com/cuducos/getgist/blob/c70a0a9353eca43360b82c759d1e1514ec265d3b/getgist/request.py#L30-L32
cuducos/getgist
getgist/request.py
GetGistRequests.post
def post(self, url, data=None, **kwargs): """Encapsulte requests.post to use this class instance header""" return requests.post(url, data=data, headers=self.add_headers(**kwargs))
python
def post(self, url, data=None, **kwargs): """Encapsulte requests.post to use this class instance header""" return requests.post(url, data=data, headers=self.add_headers(**kwargs))
[ "def", "post", "(", "self", ",", "url", ",", "data", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "requests", ".", "post", "(", "url", ",", "data", "=", "data", ",", "headers", "=", "self", ".", "add_headers", "(", "*", "*", "kwargs", ")", ")" ]
Encapsulte requests.post to use this class instance header
[ "Encapsulte", "requests", ".", "post", "to", "use", "this", "class", "instance", "header" ]
train
https://github.com/cuducos/getgist/blob/c70a0a9353eca43360b82c759d1e1514ec265d3b/getgist/request.py#L34-L36
cuducos/getgist
getgist/local.py
LocalTools.save
def save(self, content): """ Save any given content to the instance file. :param content: (str or bytes) :return: (None) """ # backup existing file if needed if os.path.exists(self.file_path) and not self.assume_yes: message = "Overwrite existing {}? (y/n) " if not confirm(message.format(self.filename)): self.backup() # write file self.output("Saving " + self.filename) with open(self.file_path, "wb") as handler: if not isinstance(content, bytes): content = bytes(content, "utf-8") handler.write(content) self.yeah("Done!")
python
def save(self, content): """ Save any given content to the instance file. :param content: (str or bytes) :return: (None) """ # backup existing file if needed if os.path.exists(self.file_path) and not self.assume_yes: message = "Overwrite existing {}? (y/n) " if not confirm(message.format(self.filename)): self.backup() # write file self.output("Saving " + self.filename) with open(self.file_path, "wb") as handler: if not isinstance(content, bytes): content = bytes(content, "utf-8") handler.write(content) self.yeah("Done!")
[ "def", "save", "(", "self", ",", "content", ")", ":", "# backup existing file if needed", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "file_path", ")", "and", "not", "self", ".", "assume_yes", ":", "message", "=", "\"Overwrite existing {}? (y/n) \"", "if", "not", "confirm", "(", "message", ".", "format", "(", "self", ".", "filename", ")", ")", ":", "self", ".", "backup", "(", ")", "# write file", "self", ".", "output", "(", "\"Saving \"", "+", "self", ".", "filename", ")", "with", "open", "(", "self", ".", "file_path", ",", "\"wb\"", ")", "as", "handler", ":", "if", "not", "isinstance", "(", "content", ",", "bytes", ")", ":", "content", "=", "bytes", "(", "content", ",", "\"utf-8\"", ")", "handler", ".", "write", "(", "content", ")", "self", ".", "yeah", "(", "\"Done!\"", ")" ]
Save any given content to the instance file. :param content: (str or bytes) :return: (None)
[ "Save", "any", "given", "content", "to", "the", "instance", "file", ".", ":", "param", "content", ":", "(", "str", "or", "bytes", ")", ":", "return", ":", "(", "None", ")" ]
train
https://github.com/cuducos/getgist/blob/c70a0a9353eca43360b82c759d1e1514ec265d3b/getgist/local.py#L23-L41
cuducos/getgist
getgist/local.py
LocalTools.backup
def backup(self): """Backups files with the same name of the instance filename""" count = 0 name = "{}.bkp".format(self.filename) backup = os.path.join(self.cwd, name) while os.path.exists(backup): count += 1 name = "{}.bkp{}".format(self.filename, count) backup = os.path.join(self.cwd, name) self.hey("Moving existing {} to {}".format(self.filename, name)) os.rename(os.path.join(self.cwd, self.filename), backup)
python
def backup(self): """Backups files with the same name of the instance filename""" count = 0 name = "{}.bkp".format(self.filename) backup = os.path.join(self.cwd, name) while os.path.exists(backup): count += 1 name = "{}.bkp{}".format(self.filename, count) backup = os.path.join(self.cwd, name) self.hey("Moving existing {} to {}".format(self.filename, name)) os.rename(os.path.join(self.cwd, self.filename), backup)
[ "def", "backup", "(", "self", ")", ":", "count", "=", "0", "name", "=", "\"{}.bkp\"", ".", "format", "(", "self", ".", "filename", ")", "backup", "=", "os", ".", "path", ".", "join", "(", "self", ".", "cwd", ",", "name", ")", "while", "os", ".", "path", ".", "exists", "(", "backup", ")", ":", "count", "+=", "1", "name", "=", "\"{}.bkp{}\"", ".", "format", "(", "self", ".", "filename", ",", "count", ")", "backup", "=", "os", ".", "path", ".", "join", "(", "self", ".", "cwd", ",", "name", ")", "self", ".", "hey", "(", "\"Moving existing {} to {}\"", ".", "format", "(", "self", ".", "filename", ",", "name", ")", ")", "os", ".", "rename", "(", "os", ".", "path", ".", "join", "(", "self", ".", "cwd", ",", "self", ".", "filename", ")", ",", "backup", ")" ]
Backups files with the same name of the instance filename
[ "Backups", "files", "with", "the", "same", "name", "of", "the", "instance", "filename" ]
train
https://github.com/cuducos/getgist/blob/c70a0a9353eca43360b82c759d1e1514ec265d3b/getgist/local.py#L43-L53
cuducos/getgist
getgist/local.py
LocalTools.read
def read(self, file_path=None): """ Read the contents of a file. :param filename: (str) path to a file in the local file system :return: (str) contents of the file, or (False) if not found/not file """ if not file_path: file_path = self.file_path # abort if the file path does not exist if not os.path.exists(file_path): self.oops("Sorry, but {} does not exist".format(file_path)) return False # abort if the file path is not a file if not os.path.isfile(file_path): self.oops("Sorry, but {} is not a file".format(file_path)) return False with open(file_path) as handler: return handler.read()
python
def read(self, file_path=None): """ Read the contents of a file. :param filename: (str) path to a file in the local file system :return: (str) contents of the file, or (False) if not found/not file """ if not file_path: file_path = self.file_path # abort if the file path does not exist if not os.path.exists(file_path): self.oops("Sorry, but {} does not exist".format(file_path)) return False # abort if the file path is not a file if not os.path.isfile(file_path): self.oops("Sorry, but {} is not a file".format(file_path)) return False with open(file_path) as handler: return handler.read()
[ "def", "read", "(", "self", ",", "file_path", "=", "None", ")", ":", "if", "not", "file_path", ":", "file_path", "=", "self", ".", "file_path", "# abort if the file path does not exist", "if", "not", "os", ".", "path", ".", "exists", "(", "file_path", ")", ":", "self", ".", "oops", "(", "\"Sorry, but {} does not exist\"", ".", "format", "(", "file_path", ")", ")", "return", "False", "# abort if the file path is not a file", "if", "not", "os", ".", "path", ".", "isfile", "(", "file_path", ")", ":", "self", ".", "oops", "(", "\"Sorry, but {} is not a file\"", ".", "format", "(", "file_path", ")", ")", "return", "False", "with", "open", "(", "file_path", ")", "as", "handler", ":", "return", "handler", ".", "read", "(", ")" ]
Read the contents of a file. :param filename: (str) path to a file in the local file system :return: (str) contents of the file, or (False) if not found/not file
[ "Read", "the", "contents", "of", "a", "file", ".", ":", "param", "filename", ":", "(", "str", ")", "path", "to", "a", "file", "in", "the", "local", "file", "system", ":", "return", ":", "(", "str", ")", "contents", "of", "the", "file", "or", "(", "False", ")", "if", "not", "found", "/", "not", "file" ]
train
https://github.com/cuducos/getgist/blob/c70a0a9353eca43360b82c759d1e1514ec265d3b/getgist/local.py#L55-L75
thautwarm/EBNFParser
Python/Ruikowa/ObjectRegex/Tokenizer.py
char_matcher
def char_matcher(mode): """ a faster way for characters to generate token strings cache """ def f_raw(inp_str, pos): return mode if inp_str[pos] is mode else None def f_collection(inp_str, pos): ch = inp_str[pos] for each in mode: if ch is each: return ch return None if isinstance(mode, str): return f_raw if len(mode) is 1: mode = mode[0] return f_raw return f_collection
python
def char_matcher(mode): """ a faster way for characters to generate token strings cache """ def f_raw(inp_str, pos): return mode if inp_str[pos] is mode else None def f_collection(inp_str, pos): ch = inp_str[pos] for each in mode: if ch is each: return ch return None if isinstance(mode, str): return f_raw if len(mode) is 1: mode = mode[0] return f_raw return f_collection
[ "def", "char_matcher", "(", "mode", ")", ":", "def", "f_raw", "(", "inp_str", ",", "pos", ")", ":", "return", "mode", "if", "inp_str", "[", "pos", "]", "is", "mode", "else", "None", "def", "f_collection", "(", "inp_str", ",", "pos", ")", ":", "ch", "=", "inp_str", "[", "pos", "]", "for", "each", "in", "mode", ":", "if", "ch", "is", "each", ":", "return", "ch", "return", "None", "if", "isinstance", "(", "mode", ",", "str", ")", ":", "return", "f_raw", "if", "len", "(", "mode", ")", "is", "1", ":", "mode", "=", "mode", "[", "0", "]", "return", "f_raw", "return", "f_collection" ]
a faster way for characters to generate token strings cache
[ "a", "faster", "way", "for", "characters", "to", "generate", "token", "strings", "cache" ]
train
https://github.com/thautwarm/EBNFParser/blob/101a92c4f408f9e6ce7b55aacb39cded9394521d/Python/Ruikowa/ObjectRegex/Tokenizer.py#L159-L181
thautwarm/EBNFParser
Python/Ruikowa/ObjectRegex/Tokenizer.py
str_matcher
def str_matcher(mode): """ generate token strings' cache """ def f_raw(inp_str, pos): return unique_literal_cache_pool[mode] if inp_str.startswith(mode, pos) else None def f_collection(inp_str, pos): for each in mode: if inp_str.startswith(each, pos): return unique_literal_cache_pool[each] return None if isinstance(mode, str): return f_raw if len(mode) is 1: mode = mode[0] return f_raw return f_collection
python
def str_matcher(mode): """ generate token strings' cache """ def f_raw(inp_str, pos): return unique_literal_cache_pool[mode] if inp_str.startswith(mode, pos) else None def f_collection(inp_str, pos): for each in mode: if inp_str.startswith(each, pos): return unique_literal_cache_pool[each] return None if isinstance(mode, str): return f_raw if len(mode) is 1: mode = mode[0] return f_raw return f_collection
[ "def", "str_matcher", "(", "mode", ")", ":", "def", "f_raw", "(", "inp_str", ",", "pos", ")", ":", "return", "unique_literal_cache_pool", "[", "mode", "]", "if", "inp_str", ".", "startswith", "(", "mode", ",", "pos", ")", "else", "None", "def", "f_collection", "(", "inp_str", ",", "pos", ")", ":", "for", "each", "in", "mode", ":", "if", "inp_str", ".", "startswith", "(", "each", ",", "pos", ")", ":", "return", "unique_literal_cache_pool", "[", "each", "]", "return", "None", "if", "isinstance", "(", "mode", ",", "str", ")", ":", "return", "f_raw", "if", "len", "(", "mode", ")", "is", "1", ":", "mode", "=", "mode", "[", "0", "]", "return", "f_raw", "return", "f_collection" ]
generate token strings' cache
[ "generate", "token", "strings", "cache" ]
train
https://github.com/thautwarm/EBNFParser/blob/101a92c4f408f9e6ce7b55aacb39cded9394521d/Python/Ruikowa/ObjectRegex/Tokenizer.py#L184-L205
thautwarm/EBNFParser
Python/Ruikowa/ObjectRegex/Tokenizer.py
regex_matcher
def regex_matcher(regex_pat): """ generate token names' cache :param regex_pat: :return: """ if isinstance(regex_pat, str): regex_pat = re.compile(regex_pat) def f(inp_str, pos): m = regex_pat.match(inp_str, pos) return m.group() if m else None return f
python
def regex_matcher(regex_pat): """ generate token names' cache :param regex_pat: :return: """ if isinstance(regex_pat, str): regex_pat = re.compile(regex_pat) def f(inp_str, pos): m = regex_pat.match(inp_str, pos) return m.group() if m else None return f
[ "def", "regex_matcher", "(", "regex_pat", ")", ":", "if", "isinstance", "(", "regex_pat", ",", "str", ")", ":", "regex_pat", "=", "re", ".", "compile", "(", "regex_pat", ")", "def", "f", "(", "inp_str", ",", "pos", ")", ":", "m", "=", "regex_pat", ".", "match", "(", "inp_str", ",", "pos", ")", "return", "m", ".", "group", "(", ")", "if", "m", "else", "None", "return", "f" ]
generate token names' cache :param regex_pat: :return:
[ "generate", "token", "names", "cache", ":", "param", "regex_pat", ":", ":", "return", ":" ]
train
https://github.com/thautwarm/EBNFParser/blob/101a92c4f408f9e6ce7b55aacb39cded9394521d/Python/Ruikowa/ObjectRegex/Tokenizer.py#L208-L221
thautwarm/EBNFParser
Python/Ruikowa/Bootstrap/Ast.py
Compiler.ast_for_stmts
def ast_for_stmts(self, stmts: T) -> None: """ Stmts ::= TokenDef{0, 1} Equals*; """ if not stmts: raise ValueError('no ast found!') head, *equals = stmts if head.name is NameEnum.TokenDef: self.ast_for_token_def(head) elif head.name is NameEnum.TokenIgnore: self.ast_for_token_ignore(head) else: self.ast_for_equals(head) for each in equals: self.ast_for_equals(each) # if every combined parser can reach any other combined, # just take any of them and compile it! if not self.compile_helper.alone and self._current__combined_parser_name: self.compile_helper.alone.add(self._current__combined_parser_name)
python
def ast_for_stmts(self, stmts: T) -> None: """ Stmts ::= TokenDef{0, 1} Equals*; """ if not stmts: raise ValueError('no ast found!') head, *equals = stmts if head.name is NameEnum.TokenDef: self.ast_for_token_def(head) elif head.name is NameEnum.TokenIgnore: self.ast_for_token_ignore(head) else: self.ast_for_equals(head) for each in equals: self.ast_for_equals(each) # if every combined parser can reach any other combined, # just take any of them and compile it! if not self.compile_helper.alone and self._current__combined_parser_name: self.compile_helper.alone.add(self._current__combined_parser_name)
[ "def", "ast_for_stmts", "(", "self", ",", "stmts", ":", "T", ")", "->", "None", ":", "if", "not", "stmts", ":", "raise", "ValueError", "(", "'no ast found!'", ")", "head", ",", "", "*", "equals", "=", "stmts", "if", "head", ".", "name", "is", "NameEnum", ".", "TokenDef", ":", "self", ".", "ast_for_token_def", "(", "head", ")", "elif", "head", ".", "name", "is", "NameEnum", ".", "TokenIgnore", ":", "self", ".", "ast_for_token_ignore", "(", "head", ")", "else", ":", "self", ".", "ast_for_equals", "(", "head", ")", "for", "each", "in", "equals", ":", "self", ".", "ast_for_equals", "(", "each", ")", "# if every combined parser can reach any other combined, \r", "# just take any of them and compile it!\r", "if", "not", "self", ".", "compile_helper", ".", "alone", "and", "self", ".", "_current__combined_parser_name", ":", "self", ".", "compile_helper", ".", "alone", ".", "add", "(", "self", ".", "_current__combined_parser_name", ")" ]
Stmts ::= TokenDef{0, 1} Equals*;
[ "Stmts", "::", "=", "TokenDef", "{", "0", "1", "}", "Equals", "*", ";" ]
train
https://github.com/thautwarm/EBNFParser/blob/101a92c4f408f9e6ce7b55aacb39cded9394521d/Python/Ruikowa/Bootstrap/Ast.py#L54-L75
andela-sjames/paystack-python
paystackapi/base.py
PayStackRequests._request
def _request(self, method, resource_uri, **kwargs): """Perform a method on a resource. Args: method: requests.`method` resource_uri: resource endpoint Raises: HTTPError Returns: JSON Response """ data = kwargs.get('data') response = method(self.API_BASE_URL + resource_uri, json=data, headers=self.headers) response.raise_for_status() return response.json()
python
def _request(self, method, resource_uri, **kwargs): """Perform a method on a resource. Args: method: requests.`method` resource_uri: resource endpoint Raises: HTTPError Returns: JSON Response """ data = kwargs.get('data') response = method(self.API_BASE_URL + resource_uri, json=data, headers=self.headers) response.raise_for_status() return response.json()
[ "def", "_request", "(", "self", ",", "method", ",", "resource_uri", ",", "*", "*", "kwargs", ")", ":", "data", "=", "kwargs", ".", "get", "(", "'data'", ")", "response", "=", "method", "(", "self", ".", "API_BASE_URL", "+", "resource_uri", ",", "json", "=", "data", ",", "headers", "=", "self", ".", "headers", ")", "response", ".", "raise_for_status", "(", ")", "return", "response", ".", "json", "(", ")" ]
Perform a method on a resource. Args: method: requests.`method` resource_uri: resource endpoint Raises: HTTPError Returns: JSON Response
[ "Perform", "a", "method", "on", "a", "resource", "." ]
train
https://github.com/andela-sjames/paystack-python/blob/c9e4bddcb76e1490fefc362e71a21486400dccd4/paystackapi/base.py#L45-L60
andela-sjames/paystack-python
paystackapi/base.py
PayStackRequests.get
def get(self, endpoint, **kwargs): """Get a resource. Args: endpoint: resource endpoint. """ return self._request(requests.get, endpoint, **kwargs)
python
def get(self, endpoint, **kwargs): """Get a resource. Args: endpoint: resource endpoint. """ return self._request(requests.get, endpoint, **kwargs)
[ "def", "get", "(", "self", ",", "endpoint", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_request", "(", "requests", ".", "get", ",", "endpoint", ",", "*", "*", "kwargs", ")" ]
Get a resource. Args: endpoint: resource endpoint.
[ "Get", "a", "resource", "." ]
train
https://github.com/andela-sjames/paystack-python/blob/c9e4bddcb76e1490fefc362e71a21486400dccd4/paystackapi/base.py#L62-L68
andela-sjames/paystack-python
paystackapi/base.py
PayStackRequests.post
def post(self, endpoint, **kwargs): """Create a resource. Args: endpoint: resource endpoint. """ return self._request(requests.post, endpoint, **kwargs)
python
def post(self, endpoint, **kwargs): """Create a resource. Args: endpoint: resource endpoint. """ return self._request(requests.post, endpoint, **kwargs)
[ "def", "post", "(", "self", ",", "endpoint", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_request", "(", "requests", ".", "post", ",", "endpoint", ",", "*", "*", "kwargs", ")" ]
Create a resource. Args: endpoint: resource endpoint.
[ "Create", "a", "resource", "." ]
train
https://github.com/andela-sjames/paystack-python/blob/c9e4bddcb76e1490fefc362e71a21486400dccd4/paystackapi/base.py#L70-L76
andela-sjames/paystack-python
paystackapi/base.py
PayStackRequests.put
def put(self, endpoint, **kwargs): """Update a resource. Args: endpoint: resource endpoint. """ return self._request(requests.put, endpoint, **kwargs)
python
def put(self, endpoint, **kwargs): """Update a resource. Args: endpoint: resource endpoint. """ return self._request(requests.put, endpoint, **kwargs)
[ "def", "put", "(", "self", ",", "endpoint", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_request", "(", "requests", ".", "put", ",", "endpoint", ",", "*", "*", "kwargs", ")" ]
Update a resource. Args: endpoint: resource endpoint.
[ "Update", "a", "resource", "." ]
train
https://github.com/andela-sjames/paystack-python/blob/c9e4bddcb76e1490fefc362e71a21486400dccd4/paystackapi/base.py#L78-L84
andela-sjames/paystack-python
paystackapi/customer.py
Customer.update
def update(cls, customer_id, **kwargs): """ Static method defined to update paystack customer data by id. Args: customer_id: paystack customer id. first_name: customer's first name(optional). last_name: customer's last name(optional). email: customer's email address(optional). phone:customer's phone number(optional). Returns: Json data from paystack API. """ return cls().requests.put('customer/{customer_id}'.format(**locals()), data=kwargs)
python
def update(cls, customer_id, **kwargs): """ Static method defined to update paystack customer data by id. Args: customer_id: paystack customer id. first_name: customer's first name(optional). last_name: customer's last name(optional). email: customer's email address(optional). phone:customer's phone number(optional). Returns: Json data from paystack API. """ return cls().requests.put('customer/{customer_id}'.format(**locals()), data=kwargs)
[ "def", "update", "(", "cls", ",", "customer_id", ",", "*", "*", "kwargs", ")", ":", "return", "cls", "(", ")", ".", "requests", ".", "put", "(", "'customer/{customer_id}'", ".", "format", "(", "*", "*", "locals", "(", ")", ")", ",", "data", "=", "kwargs", ")" ]
Static method defined to update paystack customer data by id. Args: customer_id: paystack customer id. first_name: customer's first name(optional). last_name: customer's last name(optional). email: customer's email address(optional). phone:customer's phone number(optional). Returns: Json data from paystack API.
[ "Static", "method", "defined", "to", "update", "paystack", "customer", "data", "by", "id", "." ]
train
https://github.com/andela-sjames/paystack-python/blob/c9e4bddcb76e1490fefc362e71a21486400dccd4/paystackapi/customer.py#L50-L65
datadesk/slackdown
slackdown/__init__.py
render
def render(txt): """ Accepts Slack formatted text and returns HTML. """ # Removing links to other channels txt = re.sub(r'<#[^\|]*\|(.*)>', r'#\g<1>', txt) # Removing links to other users txt = re.sub(r'<(@.*)>', r'\g<1>', txt) # handle named hyperlinks txt = re.sub(r'<([^\|]*)\|([^\|]*)>', r'<a href="\g<1>" target="blank">\g<2></a>', txt) # handle unnamed hyperlinks txt = re.sub(r'<([^a|/a].*)>', r'<a href="\g<1>" target="blank">\g<1></a>', txt) # handle ordered and unordered lists for delimeter in LIST_DELIMITERS: slack_tag = delimeter class_name = LIST_DELIMITERS[delimeter] # Wrap any lines that start with the slack_tag in <li></li> list_regex = u'(?:^|\n){}\s?(.*)'.format(slack_tag) list_repl = r'<li class="list-item-{}">\g<1></li>'.format(class_name) txt = re.sub(list_regex, list_repl, txt) # hanlde blockquotes txt = re.sub(u'(^|\n)(?:&gt;){3}\s?(.*)$', r'\g<1><blockquote>\g<2></blockquote>', txt, flags=re.DOTALL) txt = re.sub(u'(?:^|\n)&gt;\s?(.*)\n?', r'<blockquote>\g<1></blockquote>', txt) # handle code blocks txt = re.sub(r'```\n?(.*)```', r'<pre>\g<1></pre>', txt, flags=re.DOTALL) txt = re.sub(r'\n(</pre>)', r'\g<1>', txt) # handle bolding, italics, and strikethrough for wrapper in FORMATTERS: slack_tag = wrapper html_tag = FORMATTERS[wrapper] # Grab all text in formatted characters on the same line unless escaped regex = r'(?<!\\)\{t}([^\{t}|\n]*)\{t}'.format(t=slack_tag) repl = r'<{t}>\g<1></{t}>'.format(t=html_tag) txt = re.sub(regex, repl, txt) # convert line breaks txt = txt.replace('\n', '<br />') # clean up bad HTML parser = CustomSlackdownHTMLParser(txt) txt = parser.clean() # convert multiple spaces txt = txt.replace(r' ', ' &nbsp') return txt
python
def render(txt): """ Accepts Slack formatted text and returns HTML. """ # Removing links to other channels txt = re.sub(r'<#[^\|]*\|(.*)>', r'#\g<1>', txt) # Removing links to other users txt = re.sub(r'<(@.*)>', r'\g<1>', txt) # handle named hyperlinks txt = re.sub(r'<([^\|]*)\|([^\|]*)>', r'<a href="\g<1>" target="blank">\g<2></a>', txt) # handle unnamed hyperlinks txt = re.sub(r'<([^a|/a].*)>', r'<a href="\g<1>" target="blank">\g<1></a>', txt) # handle ordered and unordered lists for delimeter in LIST_DELIMITERS: slack_tag = delimeter class_name = LIST_DELIMITERS[delimeter] # Wrap any lines that start with the slack_tag in <li></li> list_regex = u'(?:^|\n){}\s?(.*)'.format(slack_tag) list_repl = r'<li class="list-item-{}">\g<1></li>'.format(class_name) txt = re.sub(list_regex, list_repl, txt) # hanlde blockquotes txt = re.sub(u'(^|\n)(?:&gt;){3}\s?(.*)$', r'\g<1><blockquote>\g<2></blockquote>', txt, flags=re.DOTALL) txt = re.sub(u'(?:^|\n)&gt;\s?(.*)\n?', r'<blockquote>\g<1></blockquote>', txt) # handle code blocks txt = re.sub(r'```\n?(.*)```', r'<pre>\g<1></pre>', txt, flags=re.DOTALL) txt = re.sub(r'\n(</pre>)', r'\g<1>', txt) # handle bolding, italics, and strikethrough for wrapper in FORMATTERS: slack_tag = wrapper html_tag = FORMATTERS[wrapper] # Grab all text in formatted characters on the same line unless escaped regex = r'(?<!\\)\{t}([^\{t}|\n]*)\{t}'.format(t=slack_tag) repl = r'<{t}>\g<1></{t}>'.format(t=html_tag) txt = re.sub(regex, repl, txt) # convert line breaks txt = txt.replace('\n', '<br />') # clean up bad HTML parser = CustomSlackdownHTMLParser(txt) txt = parser.clean() # convert multiple spaces txt = txt.replace(r' ', ' &nbsp') return txt
[ "def", "render", "(", "txt", ")", ":", "# Removing links to other channels", "txt", "=", "re", ".", "sub", "(", "r'<#[^\\|]*\\|(.*)>'", ",", "r'#\\g<1>'", ",", "txt", ")", "# Removing links to other users", "txt", "=", "re", ".", "sub", "(", "r'<(@.*)>'", ",", "r'\\g<1>'", ",", "txt", ")", "# handle named hyperlinks", "txt", "=", "re", ".", "sub", "(", "r'<([^\\|]*)\\|([^\\|]*)>'", ",", "r'<a href=\"\\g<1>\" target=\"blank\">\\g<2></a>'", ",", "txt", ")", "# handle unnamed hyperlinks", "txt", "=", "re", ".", "sub", "(", "r'<([^a|/a].*)>'", ",", "r'<a href=\"\\g<1>\" target=\"blank\">\\g<1></a>'", ",", "txt", ")", "# handle ordered and unordered lists", "for", "delimeter", "in", "LIST_DELIMITERS", ":", "slack_tag", "=", "delimeter", "class_name", "=", "LIST_DELIMITERS", "[", "delimeter", "]", "# Wrap any lines that start with the slack_tag in <li></li>", "list_regex", "=", "u'(?:^|\\n){}\\s?(.*)'", ".", "format", "(", "slack_tag", ")", "list_repl", "=", "r'<li class=\"list-item-{}\">\\g<1></li>'", ".", "format", "(", "class_name", ")", "txt", "=", "re", ".", "sub", "(", "list_regex", ",", "list_repl", ",", "txt", ")", "# hanlde blockquotes", "txt", "=", "re", ".", "sub", "(", "u'(^|\\n)(?:&gt;){3}\\s?(.*)$'", ",", "r'\\g<1><blockquote>\\g<2></blockquote>'", ",", "txt", ",", "flags", "=", "re", ".", "DOTALL", ")", "txt", "=", "re", ".", "sub", "(", "u'(?:^|\\n)&gt;\\s?(.*)\\n?'", ",", "r'<blockquote>\\g<1></blockquote>'", ",", "txt", ")", "# handle code blocks", "txt", "=", "re", ".", "sub", "(", "r'```\\n?(.*)```'", ",", "r'<pre>\\g<1></pre>'", ",", "txt", ",", "flags", "=", "re", ".", "DOTALL", ")", "txt", "=", "re", ".", "sub", "(", "r'\\n(</pre>)'", ",", "r'\\g<1>'", ",", "txt", ")", "# handle bolding, italics, and strikethrough", "for", "wrapper", "in", "FORMATTERS", ":", "slack_tag", "=", "wrapper", "html_tag", "=", "FORMATTERS", "[", "wrapper", "]", "# Grab all text in formatted characters on the same line unless escaped", "regex", "=", "r'(?<!\\\\)\\{t}([^\\{t}|\\n]*)\\{t}'", ".", "format", "(", "t", "=", "slack_tag", ")", "repl", "=", "r'<{t}>\\g<1></{t}>'", ".", "format", "(", "t", "=", "html_tag", ")", "txt", "=", "re", ".", "sub", "(", "regex", ",", "repl", ",", "txt", ")", "# convert line breaks", "txt", "=", "txt", ".", "replace", "(", "'\\n'", ",", "'<br />'", ")", "# clean up bad HTML", "parser", "=", "CustomSlackdownHTMLParser", "(", "txt", ")", "txt", "=", "parser", ".", "clean", "(", ")", "# convert multiple spaces", "txt", "=", "txt", ".", "replace", "(", "r' '", ",", "' &nbsp'", ")", "return", "txt" ]
Accepts Slack formatted text and returns HTML.
[ "Accepts", "Slack", "formatted", "text", "and", "returns", "HTML", "." ]
train
https://github.com/datadesk/slackdown/blob/2c5c2faf2673d0d58183f590f234d2c7e1fe8508/slackdown/__init__.py#L59-L114
datadesk/slackdown
slackdown/__init__.py
CustomSlackdownHTMLParser._open_list
def _open_list(self, list_type): """ Add an open list tag corresponding to the specification in the parser's LIST_TYPES. """ if list_type in LIST_TYPES.keys(): tag = LIST_TYPES[list_type] else: raise Exception('CustomSlackdownHTMLParser:_open_list: Not a valid list type.') html = '<{t} class="list-container-{c}">'.format( t=tag, c=list_type ) self.cleaned_html += html self.current_parent_element['tag'] = LIST_TYPES[list_type] self.current_parent_element['attrs'] = {'class': list_type}
python
def _open_list(self, list_type): """ Add an open list tag corresponding to the specification in the parser's LIST_TYPES. """ if list_type in LIST_TYPES.keys(): tag = LIST_TYPES[list_type] else: raise Exception('CustomSlackdownHTMLParser:_open_list: Not a valid list type.') html = '<{t} class="list-container-{c}">'.format( t=tag, c=list_type ) self.cleaned_html += html self.current_parent_element['tag'] = LIST_TYPES[list_type] self.current_parent_element['attrs'] = {'class': list_type}
[ "def", "_open_list", "(", "self", ",", "list_type", ")", ":", "if", "list_type", "in", "LIST_TYPES", ".", "keys", "(", ")", ":", "tag", "=", "LIST_TYPES", "[", "list_type", "]", "else", ":", "raise", "Exception", "(", "'CustomSlackdownHTMLParser:_open_list: Not a valid list type.'", ")", "html", "=", "'<{t} class=\"list-container-{c}\">'", ".", "format", "(", "t", "=", "tag", ",", "c", "=", "list_type", ")", "self", ".", "cleaned_html", "+=", "html", "self", ".", "current_parent_element", "[", "'tag'", "]", "=", "LIST_TYPES", "[", "list_type", "]", "self", ".", "current_parent_element", "[", "'attrs'", "]", "=", "{", "'class'", ":", "list_type", "}" ]
Add an open list tag corresponding to the specification in the parser's LIST_TYPES.
[ "Add", "an", "open", "list", "tag", "corresponding", "to", "the", "specification", "in", "the", "parser", "s", "LIST_TYPES", "." ]
train
https://github.com/datadesk/slackdown/blob/2c5c2faf2673d0d58183f590f234d2c7e1fe8508/slackdown/__init__.py#L134-L150
datadesk/slackdown
slackdown/__init__.py
CustomSlackdownHTMLParser._close_list
def _close_list(self): """ Add an close list tag corresponding to the currently open list found in current_parent_element. """ list_type = self.current_parent_element['attrs']['class'] tag = LIST_TYPES[list_type] html = '</{t}>'.format( t=tag ) self.cleaned_html += html self.current_parent_element['tag'] = '' self.current_parent_element['attrs'] = {}
python
def _close_list(self): """ Add an close list tag corresponding to the currently open list found in current_parent_element. """ list_type = self.current_parent_element['attrs']['class'] tag = LIST_TYPES[list_type] html = '</{t}>'.format( t=tag ) self.cleaned_html += html self.current_parent_element['tag'] = '' self.current_parent_element['attrs'] = {}
[ "def", "_close_list", "(", "self", ")", ":", "list_type", "=", "self", ".", "current_parent_element", "[", "'attrs'", "]", "[", "'class'", "]", "tag", "=", "LIST_TYPES", "[", "list_type", "]", "html", "=", "'</{t}>'", ".", "format", "(", "t", "=", "tag", ")", "self", ".", "cleaned_html", "+=", "html", "self", ".", "current_parent_element", "[", "'tag'", "]", "=", "''", "self", ".", "current_parent_element", "[", "'attrs'", "]", "=", "{", "}" ]
Add an close list tag corresponding to the currently open list found in current_parent_element.
[ "Add", "an", "close", "list", "tag", "corresponding", "to", "the", "currently", "open", "list", "found", "in", "current_parent_element", "." ]
train
https://github.com/datadesk/slackdown/blob/2c5c2faf2673d0d58183f590f234d2c7e1fe8508/slackdown/__init__.py#L152-L165
datadesk/slackdown
slackdown/__init__.py
CustomSlackdownHTMLParser.handle_starttag
def handle_starttag(self, tag, attrs): """ Called by HTMLParser.feed when a start tag is found. """ # Parse the tag attributes attrs_dict = dict(t for t in attrs) # If the tag is a predefined parent element if tag in PARENT_ELEMENTS: # If parser is parsing another parent element if self.current_parent_element['tag'] != '': # close the parent element self.cleaned_html += '</{}>'.format(self.current_parent_element['tag']) self.current_parent_element['tag'] = tag self.current_parent_element['attrs'] = {} self.cleaned_html += '<{}>'.format(tag) # If the tag is a list item elif tag == 'li': self.parsing_li = True # Parse the class name & subsequent type class_name = attrs_dict['class'] list_type = class_name[10:] # Check if parsing a list if self.current_parent_element['tag'] == 'ul' or self.current_parent_element['tag'] == 'ol': cur_list_type = self.current_parent_element['attrs']['class'] # Parsing a different list if cur_list_type != list_type: # Close that list self._close_list() # Open new list self._open_list(list_type) # Not parsing a list else: # if parsing some other parent if self.current_parent_element['tag'] != '': self.cleaned_html += '</{}>'.format(self.current_parent_element['tag']) # Open new list self._open_list(list_type) self.cleaned_html += '<{}>'.format(tag) # If the tag is a line break elif tag == 'br': # If parsing a paragraph, close it if self.current_parent_element['tag'] == 'p': self.cleaned_html += '</p>' self.current_parent_element['tag'] = '' self.current_parent_element['attrs'] = {} # If parsing a list, close it elif self.current_parent_element['tag'] == 'ul' or self.current_parent_element['tag'] == 'ol': self._close_list() # If parsing any other parent element, keep it elif self.current_parent_element['tag'] in PARENT_ELEMENTS: self.cleaned_html += '<br />' # If not in any parent element, create an empty paragraph else: self.cleaned_html += '<p></p>' # If the tag is something else, like a <b> or <i> tag else: # If not parsing any parent element if self.current_parent_element['tag'] == '': self.cleaned_html += '<p>' self.current_parent_element['tag'] = 'p' self.cleaned_html += '<{}'.format(tag) for attr in sorted(attrs_dict.keys()): self.cleaned_html += ' {k}="{v}"'.format( k=attr, v=attrs_dict[attr] ) self.cleaned_html += '>'
python
def handle_starttag(self, tag, attrs): """ Called by HTMLParser.feed when a start tag is found. """ # Parse the tag attributes attrs_dict = dict(t for t in attrs) # If the tag is a predefined parent element if tag in PARENT_ELEMENTS: # If parser is parsing another parent element if self.current_parent_element['tag'] != '': # close the parent element self.cleaned_html += '</{}>'.format(self.current_parent_element['tag']) self.current_parent_element['tag'] = tag self.current_parent_element['attrs'] = {} self.cleaned_html += '<{}>'.format(tag) # If the tag is a list item elif tag == 'li': self.parsing_li = True # Parse the class name & subsequent type class_name = attrs_dict['class'] list_type = class_name[10:] # Check if parsing a list if self.current_parent_element['tag'] == 'ul' or self.current_parent_element['tag'] == 'ol': cur_list_type = self.current_parent_element['attrs']['class'] # Parsing a different list if cur_list_type != list_type: # Close that list self._close_list() # Open new list self._open_list(list_type) # Not parsing a list else: # if parsing some other parent if self.current_parent_element['tag'] != '': self.cleaned_html += '</{}>'.format(self.current_parent_element['tag']) # Open new list self._open_list(list_type) self.cleaned_html += '<{}>'.format(tag) # If the tag is a line break elif tag == 'br': # If parsing a paragraph, close it if self.current_parent_element['tag'] == 'p': self.cleaned_html += '</p>' self.current_parent_element['tag'] = '' self.current_parent_element['attrs'] = {} # If parsing a list, close it elif self.current_parent_element['tag'] == 'ul' or self.current_parent_element['tag'] == 'ol': self._close_list() # If parsing any other parent element, keep it elif self.current_parent_element['tag'] in PARENT_ELEMENTS: self.cleaned_html += '<br />' # If not in any parent element, create an empty paragraph else: self.cleaned_html += '<p></p>' # If the tag is something else, like a <b> or <i> tag else: # If not parsing any parent element if self.current_parent_element['tag'] == '': self.cleaned_html += '<p>' self.current_parent_element['tag'] = 'p' self.cleaned_html += '<{}'.format(tag) for attr in sorted(attrs_dict.keys()): self.cleaned_html += ' {k}="{v}"'.format( k=attr, v=attrs_dict[attr] ) self.cleaned_html += '>'
[ "def", "handle_starttag", "(", "self", ",", "tag", ",", "attrs", ")", ":", "# Parse the tag attributes", "attrs_dict", "=", "dict", "(", "t", "for", "t", "in", "attrs", ")", "# If the tag is a predefined parent element", "if", "tag", "in", "PARENT_ELEMENTS", ":", "# If parser is parsing another parent element", "if", "self", ".", "current_parent_element", "[", "'tag'", "]", "!=", "''", ":", "# close the parent element", "self", ".", "cleaned_html", "+=", "'</{}>'", ".", "format", "(", "self", ".", "current_parent_element", "[", "'tag'", "]", ")", "self", ".", "current_parent_element", "[", "'tag'", "]", "=", "tag", "self", ".", "current_parent_element", "[", "'attrs'", "]", "=", "{", "}", "self", ".", "cleaned_html", "+=", "'<{}>'", ".", "format", "(", "tag", ")", "# If the tag is a list item", "elif", "tag", "==", "'li'", ":", "self", ".", "parsing_li", "=", "True", "# Parse the class name & subsequent type", "class_name", "=", "attrs_dict", "[", "'class'", "]", "list_type", "=", "class_name", "[", "10", ":", "]", "# Check if parsing a list", "if", "self", ".", "current_parent_element", "[", "'tag'", "]", "==", "'ul'", "or", "self", ".", "current_parent_element", "[", "'tag'", "]", "==", "'ol'", ":", "cur_list_type", "=", "self", ".", "current_parent_element", "[", "'attrs'", "]", "[", "'class'", "]", "# Parsing a different list", "if", "cur_list_type", "!=", "list_type", ":", "# Close that list", "self", ".", "_close_list", "(", ")", "# Open new list", "self", ".", "_open_list", "(", "list_type", ")", "# Not parsing a list", "else", ":", "# if parsing some other parent", "if", "self", ".", "current_parent_element", "[", "'tag'", "]", "!=", "''", ":", "self", ".", "cleaned_html", "+=", "'</{}>'", ".", "format", "(", "self", ".", "current_parent_element", "[", "'tag'", "]", ")", "# Open new list", "self", ".", "_open_list", "(", "list_type", ")", "self", ".", "cleaned_html", "+=", "'<{}>'", ".", "format", "(", "tag", ")", "# If the tag is a line break", "elif", "tag", "==", "'br'", ":", "# If parsing a paragraph, close it", "if", "self", ".", "current_parent_element", "[", "'tag'", "]", "==", "'p'", ":", "self", ".", "cleaned_html", "+=", "'</p>'", "self", ".", "current_parent_element", "[", "'tag'", "]", "=", "''", "self", ".", "current_parent_element", "[", "'attrs'", "]", "=", "{", "}", "# If parsing a list, close it", "elif", "self", ".", "current_parent_element", "[", "'tag'", "]", "==", "'ul'", "or", "self", ".", "current_parent_element", "[", "'tag'", "]", "==", "'ol'", ":", "self", ".", "_close_list", "(", ")", "# If parsing any other parent element, keep it", "elif", "self", ".", "current_parent_element", "[", "'tag'", "]", "in", "PARENT_ELEMENTS", ":", "self", ".", "cleaned_html", "+=", "'<br />'", "# If not in any parent element, create an empty paragraph", "else", ":", "self", ".", "cleaned_html", "+=", "'<p></p>'", "# If the tag is something else, like a <b> or <i> tag", "else", ":", "# If not parsing any parent element", "if", "self", ".", "current_parent_element", "[", "'tag'", "]", "==", "''", ":", "self", ".", "cleaned_html", "+=", "'<p>'", "self", ".", "current_parent_element", "[", "'tag'", "]", "=", "'p'", "self", ".", "cleaned_html", "+=", "'<{}'", ".", "format", "(", "tag", ")", "for", "attr", "in", "sorted", "(", "attrs_dict", ".", "keys", "(", ")", ")", ":", "self", ".", "cleaned_html", "+=", "' {k}=\"{v}\"'", ".", "format", "(", "k", "=", "attr", ",", "v", "=", "attrs_dict", "[", "attr", "]", ")", "self", ".", "cleaned_html", "+=", "'>'" ]
Called by HTMLParser.feed when a start tag is found.
[ "Called", "by", "HTMLParser", ".", "feed", "when", "a", "start", "tag", "is", "found", "." ]
train
https://github.com/datadesk/slackdown/blob/2c5c2faf2673d0d58183f590f234d2c7e1fe8508/slackdown/__init__.py#L167-L245
datadesk/slackdown
slackdown/__init__.py
CustomSlackdownHTMLParser.handle_endtag
def handle_endtag(self, tag): """ Called by HTMLParser.feed when an end tag is found. """ if tag in PARENT_ELEMENTS: self.current_parent_element['tag'] = '' self.current_parent_element['attrs'] = '' if tag == 'li': self.parsing_li = True if tag != 'br': self.cleaned_html += '</{}>'.format(tag)
python
def handle_endtag(self, tag): """ Called by HTMLParser.feed when an end tag is found. """ if tag in PARENT_ELEMENTS: self.current_parent_element['tag'] = '' self.current_parent_element['attrs'] = '' if tag == 'li': self.parsing_li = True if tag != 'br': self.cleaned_html += '</{}>'.format(tag)
[ "def", "handle_endtag", "(", "self", ",", "tag", ")", ":", "if", "tag", "in", "PARENT_ELEMENTS", ":", "self", ".", "current_parent_element", "[", "'tag'", "]", "=", "''", "self", ".", "current_parent_element", "[", "'attrs'", "]", "=", "''", "if", "tag", "==", "'li'", ":", "self", ".", "parsing_li", "=", "True", "if", "tag", "!=", "'br'", ":", "self", ".", "cleaned_html", "+=", "'</{}>'", ".", "format", "(", "tag", ")" ]
Called by HTMLParser.feed when an end tag is found.
[ "Called", "by", "HTMLParser", ".", "feed", "when", "an", "end", "tag", "is", "found", "." ]
train
https://github.com/datadesk/slackdown/blob/2c5c2faf2673d0d58183f590f234d2c7e1fe8508/slackdown/__init__.py#L247-L258
datadesk/slackdown
slackdown/__init__.py
CustomSlackdownHTMLParser.handle_data
def handle_data(self, data): """ Called by HTMLParser.feed when text is found. """ if self.current_parent_element['tag'] == '': self.cleaned_html += '<p>' self.current_parent_element['tag'] = 'p' self.cleaned_html += data
python
def handle_data(self, data): """ Called by HTMLParser.feed when text is found. """ if self.current_parent_element['tag'] == '': self.cleaned_html += '<p>' self.current_parent_element['tag'] = 'p' self.cleaned_html += data
[ "def", "handle_data", "(", "self", ",", "data", ")", ":", "if", "self", ".", "current_parent_element", "[", "'tag'", "]", "==", "''", ":", "self", ".", "cleaned_html", "+=", "'<p>'", "self", ".", "current_parent_element", "[", "'tag'", "]", "=", "'p'", "self", ".", "cleaned_html", "+=", "data" ]
Called by HTMLParser.feed when text is found.
[ "Called", "by", "HTMLParser", ".", "feed", "when", "text", "is", "found", "." ]
train
https://github.com/datadesk/slackdown/blob/2c5c2faf2673d0d58183f590f234d2c7e1fe8508/slackdown/__init__.py#L260-L268
datadesk/slackdown
slackdown/__init__.py
CustomSlackdownHTMLParser._remove_pre_formatting
def _remove_pre_formatting(self): """ Removes formatting tags added to pre elements. """ preformatted_wrappers = [ 'pre', 'code' ] for wrapper in preformatted_wrappers: for formatter in FORMATTERS: tag = FORMATTERS[formatter] character = formatter regex = r'(<{w}>.*)<{t}>(.*)</{t}>(.*</{w}>)'.format( t=tag, w=wrapper ) repl = r'\g<1>{c}\g<2>{c}\g<3>'.format(c=character) self.cleaned_html = re.sub(regex, repl, self.cleaned_html)
python
def _remove_pre_formatting(self): """ Removes formatting tags added to pre elements. """ preformatted_wrappers = [ 'pre', 'code' ] for wrapper in preformatted_wrappers: for formatter in FORMATTERS: tag = FORMATTERS[formatter] character = formatter regex = r'(<{w}>.*)<{t}>(.*)</{t}>(.*</{w}>)'.format( t=tag, w=wrapper ) repl = r'\g<1>{c}\g<2>{c}\g<3>'.format(c=character) self.cleaned_html = re.sub(regex, repl, self.cleaned_html)
[ "def", "_remove_pre_formatting", "(", "self", ")", ":", "preformatted_wrappers", "=", "[", "'pre'", ",", "'code'", "]", "for", "wrapper", "in", "preformatted_wrappers", ":", "for", "formatter", "in", "FORMATTERS", ":", "tag", "=", "FORMATTERS", "[", "formatter", "]", "character", "=", "formatter", "regex", "=", "r'(<{w}>.*)<{t}>(.*)</{t}>(.*</{w}>)'", ".", "format", "(", "t", "=", "tag", ",", "w", "=", "wrapper", ")", "repl", "=", "r'\\g<1>{c}\\g<2>{c}\\g<3>'", ".", "format", "(", "c", "=", "character", ")", "self", ".", "cleaned_html", "=", "re", ".", "sub", "(", "regex", ",", "repl", ",", "self", ".", "cleaned_html", ")" ]
Removes formatting tags added to pre elements.
[ "Removes", "formatting", "tags", "added", "to", "pre", "elements", "." ]
train
https://github.com/datadesk/slackdown/blob/2c5c2faf2673d0d58183f590f234d2c7e1fe8508/slackdown/__init__.py#L270-L289
datadesk/slackdown
slackdown/__init__.py
CustomSlackdownHTMLParser.clean
def clean(self): """ Goes through the txt input and cleans up any problematic HTML. """ # Calls handle_starttag, handle_endtag, and handle_data self.feed() # Clean up any parent tags left open if self.current_parent_element['tag'] != '': self.cleaned_html += '</{}>'.format(self.current_parent_element['tag']) # Remove empty <p> added after lists self.cleaned_html = re.sub(r'(</[u|o]l>)<p></p>', r'\g<1>', self.cleaned_html) self._remove_pre_formatting() return self.cleaned_html
python
def clean(self): """ Goes through the txt input and cleans up any problematic HTML. """ # Calls handle_starttag, handle_endtag, and handle_data self.feed() # Clean up any parent tags left open if self.current_parent_element['tag'] != '': self.cleaned_html += '</{}>'.format(self.current_parent_element['tag']) # Remove empty <p> added after lists self.cleaned_html = re.sub(r'(</[u|o]l>)<p></p>', r'\g<1>', self.cleaned_html) self._remove_pre_formatting() return self.cleaned_html
[ "def", "clean", "(", "self", ")", ":", "# Calls handle_starttag, handle_endtag, and handle_data", "self", ".", "feed", "(", ")", "# Clean up any parent tags left open", "if", "self", ".", "current_parent_element", "[", "'tag'", "]", "!=", "''", ":", "self", ".", "cleaned_html", "+=", "'</{}>'", ".", "format", "(", "self", ".", "current_parent_element", "[", "'tag'", "]", ")", "# Remove empty <p> added after lists", "self", ".", "cleaned_html", "=", "re", ".", "sub", "(", "r'(</[u|o]l>)<p></p>'", ",", "r'\\g<1>'", ",", "self", ".", "cleaned_html", ")", "self", ".", "_remove_pre_formatting", "(", ")", "return", "self", ".", "cleaned_html" ]
Goes through the txt input and cleans up any problematic HTML.
[ "Goes", "through", "the", "txt", "input", "and", "cleans", "up", "any", "problematic", "HTML", "." ]
train
https://github.com/datadesk/slackdown/blob/2c5c2faf2673d0d58183f590f234d2c7e1fe8508/slackdown/__init__.py#L297-L313
datadesk/slackdown
slackdown/templates.py
render_author
def render_author(**kwargs): """ Unstrict template block for rendering authors: <div class="author"> <img class="author-avatar" src="{author_avatar}"> <p class="author-name"> <a href="{author_link}">{author_name}</a> </p> <p class="user-handle">{author_handle}</p> </div> """ html = '<div class="user">' author_avatar = kwargs.get('author_avatar', None) if author_avatar: html += '<img class="user-avatar" src="{}">'.format(author_avatar) author_name = kwargs.get('author_name', None) if author_name: html += '<p class="user-name">' author_link = kwargs.get('author_link', None) if author_link: html += '<a href="{author_link}">{author_name}</a>'.format( author_link=author_link, author_name=author_name ) else: html += author_name html += '</p>' author_handle = kwargs.get('author_handle', None) if author_handle: html += '<p class="user-handle">{}</p>'.format(author_handle) html += '</div>'
python
def render_author(**kwargs): """ Unstrict template block for rendering authors: <div class="author"> <img class="author-avatar" src="{author_avatar}"> <p class="author-name"> <a href="{author_link}">{author_name}</a> </p> <p class="user-handle">{author_handle}</p> </div> """ html = '<div class="user">' author_avatar = kwargs.get('author_avatar', None) if author_avatar: html += '<img class="user-avatar" src="{}">'.format(author_avatar) author_name = kwargs.get('author_name', None) if author_name: html += '<p class="user-name">' author_link = kwargs.get('author_link', None) if author_link: html += '<a href="{author_link}">{author_name}</a>'.format( author_link=author_link, author_name=author_name ) else: html += author_name html += '</p>' author_handle = kwargs.get('author_handle', None) if author_handle: html += '<p class="user-handle">{}</p>'.format(author_handle) html += '</div>'
[ "def", "render_author", "(", "*", "*", "kwargs", ")", ":", "html", "=", "'<div class=\"user\">'", "author_avatar", "=", "kwargs", ".", "get", "(", "'author_avatar'", ",", "None", ")", "if", "author_avatar", ":", "html", "+=", "'<img class=\"user-avatar\" src=\"{}\">'", ".", "format", "(", "author_avatar", ")", "author_name", "=", "kwargs", ".", "get", "(", "'author_name'", ",", "None", ")", "if", "author_name", ":", "html", "+=", "'<p class=\"user-name\">'", "author_link", "=", "kwargs", ".", "get", "(", "'author_link'", ",", "None", ")", "if", "author_link", ":", "html", "+=", "'<a href=\"{author_link}\">{author_name}</a>'", ".", "format", "(", "author_link", "=", "author_link", ",", "author_name", "=", "author_name", ")", "else", ":", "html", "+=", "author_name", "html", "+=", "'</p>'", "author_handle", "=", "kwargs", ".", "get", "(", "'author_handle'", ",", "None", ")", "if", "author_handle", ":", "html", "+=", "'<p class=\"user-handle\">{}</p>'", ".", "format", "(", "author_handle", ")", "html", "+=", "'</div>'" ]
Unstrict template block for rendering authors: <div class="author"> <img class="author-avatar" src="{author_avatar}"> <p class="author-name"> <a href="{author_link}">{author_name}</a> </p> <p class="user-handle">{author_handle}</p> </div>
[ "Unstrict", "template", "block", "for", "rendering", "authors", ":", "<div", "class", "=", "author", ">", "<img", "class", "=", "author", "-", "avatar", "src", "=", "{", "author_avatar", "}", ">", "<p", "class", "=", "author", "-", "name", ">", "<a", "href", "=", "{", "author_link", "}", ">", "{", "author_name", "}", "<", "/", "a", ">", "<", "/", "p", ">", "<p", "class", "=", "user", "-", "handle", ">", "{", "author_handle", "}", "<", "/", "p", ">", "<", "/", "div", ">" ]
train
https://github.com/datadesk/slackdown/blob/2c5c2faf2673d0d58183f590f234d2c7e1fe8508/slackdown/templates.py#L1-L37
datadesk/slackdown
slackdown/templates.py
render_metadata
def render_metadata(**kwargs): """ Unstrict template block for rendering metadata: <div class="metadata"> <img class="metadata-logo" src="{service_logo}"> <p class="metadata-name">{service_name}</p> <p class="metadata-timestamp"> <a href="{timestamp_link}">{timestamp}</a> </p> </div> """ html = '<div class="metadata">' service_logo = kwargs.get('service_logo', None) if service_logo: html += '<img class="metadata-logo" src="{}">'.format(service_logo) service_name = kwargs.get('service_name', None) if service_name: html += '<p class="metadata-name">{}</p>'.format(service_name) timestamp = kwargs.get('timestamp', None) if timestamp: html += '<p class="user-name">' timestamp_link = kwargs.get('timestamp_link', None) if timestamp_link: html += '<a href="{timestamp_link}">{timestamp}</a>'.format( timestamp_link=timestamp_link, timestamp=timestamp ) else: html += timestamp html += '</p>' html += '</div>'
python
def render_metadata(**kwargs): """ Unstrict template block for rendering metadata: <div class="metadata"> <img class="metadata-logo" src="{service_logo}"> <p class="metadata-name">{service_name}</p> <p class="metadata-timestamp"> <a href="{timestamp_link}">{timestamp}</a> </p> </div> """ html = '<div class="metadata">' service_logo = kwargs.get('service_logo', None) if service_logo: html += '<img class="metadata-logo" src="{}">'.format(service_logo) service_name = kwargs.get('service_name', None) if service_name: html += '<p class="metadata-name">{}</p>'.format(service_name) timestamp = kwargs.get('timestamp', None) if timestamp: html += '<p class="user-name">' timestamp_link = kwargs.get('timestamp_link', None) if timestamp_link: html += '<a href="{timestamp_link}">{timestamp}</a>'.format( timestamp_link=timestamp_link, timestamp=timestamp ) else: html += timestamp html += '</p>' html += '</div>'
[ "def", "render_metadata", "(", "*", "*", "kwargs", ")", ":", "html", "=", "'<div class=\"metadata\">'", "service_logo", "=", "kwargs", ".", "get", "(", "'service_logo'", ",", "None", ")", "if", "service_logo", ":", "html", "+=", "'<img class=\"metadata-logo\" src=\"{}\">'", ".", "format", "(", "service_logo", ")", "service_name", "=", "kwargs", ".", "get", "(", "'service_name'", ",", "None", ")", "if", "service_name", ":", "html", "+=", "'<p class=\"metadata-name\">{}</p>'", ".", "format", "(", "service_name", ")", "timestamp", "=", "kwargs", ".", "get", "(", "'timestamp'", ",", "None", ")", "if", "timestamp", ":", "html", "+=", "'<p class=\"user-name\">'", "timestamp_link", "=", "kwargs", ".", "get", "(", "'timestamp_link'", ",", "None", ")", "if", "timestamp_link", ":", "html", "+=", "'<a href=\"{timestamp_link}\">{timestamp}</a>'", ".", "format", "(", "timestamp_link", "=", "timestamp_link", ",", "timestamp", "=", "timestamp", ")", "else", ":", "html", "+=", "timestamp", "html", "+=", "'</p>'", "html", "+=", "'</div>'" ]
Unstrict template block for rendering metadata: <div class="metadata"> <img class="metadata-logo" src="{service_logo}"> <p class="metadata-name">{service_name}</p> <p class="metadata-timestamp"> <a href="{timestamp_link}">{timestamp}</a> </p> </div>
[ "Unstrict", "template", "block", "for", "rendering", "metadata", ":", "<div", "class", "=", "metadata", ">", "<img", "class", "=", "metadata", "-", "logo", "src", "=", "{", "service_logo", "}", ">", "<p", "class", "=", "metadata", "-", "name", ">", "{", "service_name", "}", "<", "/", "p", ">", "<p", "class", "=", "metadata", "-", "timestamp", ">", "<a", "href", "=", "{", "timestamp_link", "}", ">", "{", "timestamp", "}", "<", "/", "a", ">", "<", "/", "p", ">", "<", "/", "div", ">" ]
train
https://github.com/datadesk/slackdown/blob/2c5c2faf2673d0d58183f590f234d2c7e1fe8508/slackdown/templates.py#L40-L76
datadesk/slackdown
slackdown/templates.py
render_image
def render_image(**kwargs): """ Unstrict template block for rendering an image: <img alt="{alt_text}" title="{title}" src="{url}"> """ html = '' url = kwargs.get('url', None) if url: html = '<img' alt_text = kwargs.get('alt_text', None) if alt_text: html += ' alt="{}"'.format(alt_text) title = kwargs.get('title', None) if title: html += ' title="{}"'.format(title) html += ' src="{}">'.format(url) return html
python
def render_image(**kwargs): """ Unstrict template block for rendering an image: <img alt="{alt_text}" title="{title}" src="{url}"> """ html = '' url = kwargs.get('url', None) if url: html = '<img' alt_text = kwargs.get('alt_text', None) if alt_text: html += ' alt="{}"'.format(alt_text) title = kwargs.get('title', None) if title: html += ' title="{}"'.format(title) html += ' src="{}">'.format(url) return html
[ "def", "render_image", "(", "*", "*", "kwargs", ")", ":", "html", "=", "''", "url", "=", "kwargs", ".", "get", "(", "'url'", ",", "None", ")", "if", "url", ":", "html", "=", "'<img'", "alt_text", "=", "kwargs", ".", "get", "(", "'alt_text'", ",", "None", ")", "if", "alt_text", ":", "html", "+=", "' alt=\"{}\"'", ".", "format", "(", "alt_text", ")", "title", "=", "kwargs", ".", "get", "(", "'title'", ",", "None", ")", "if", "title", ":", "html", "+=", "' title=\"{}\"'", ".", "format", "(", "title", ")", "html", "+=", "' src=\"{}\">'", ".", "format", "(", "url", ")", "return", "html" ]
Unstrict template block for rendering an image: <img alt="{alt_text}" title="{title}" src="{url}">
[ "Unstrict", "template", "block", "for", "rendering", "an", "image", ":", "<img", "alt", "=", "{", "alt_text", "}", "title", "=", "{", "title", "}", "src", "=", "{", "url", "}", ">" ]
train
https://github.com/datadesk/slackdown/blob/2c5c2faf2673d0d58183f590f234d2c7e1fe8508/slackdown/templates.py#L79-L100
datadesk/slackdown
slackdown/templates.py
render_twitter
def render_twitter(text, **kwargs): """ Strict template block for rendering twitter embeds. """ author = render_author(**kwargs['author']) metadata = render_metadata(**kwargs['metadata']) image = render_image(**kwargs['image']) html = """ <div class="attachment attachment-twitter"> {author} <p class="twitter-content">{text}</p> {metadata} {image} </div> """.format( author=author, text=text, metadata=metadata, image=image ).strip() return html
python
def render_twitter(text, **kwargs): """ Strict template block for rendering twitter embeds. """ author = render_author(**kwargs['author']) metadata = render_metadata(**kwargs['metadata']) image = render_image(**kwargs['image']) html = """ <div class="attachment attachment-twitter"> {author} <p class="twitter-content">{text}</p> {metadata} {image} </div> """.format( author=author, text=text, metadata=metadata, image=image ).strip() return html
[ "def", "render_twitter", "(", "text", ",", "*", "*", "kwargs", ")", ":", "author", "=", "render_author", "(", "*", "*", "kwargs", "[", "'author'", "]", ")", "metadata", "=", "render_metadata", "(", "*", "*", "kwargs", "[", "'metadata'", "]", ")", "image", "=", "render_image", "(", "*", "*", "kwargs", "[", "'image'", "]", ")", "html", "=", "\"\"\"\n <div class=\"attachment attachment-twitter\">\n {author}\n <p class=\"twitter-content\">{text}</p>\n {metadata}\n {image}\n </div>\n \"\"\"", ".", "format", "(", "author", "=", "author", ",", "text", "=", "text", ",", "metadata", "=", "metadata", ",", "image", "=", "image", ")", ".", "strip", "(", ")", "return", "html" ]
Strict template block for rendering twitter embeds.
[ "Strict", "template", "block", "for", "rendering", "twitter", "embeds", "." ]
train
https://github.com/datadesk/slackdown/blob/2c5c2faf2673d0d58183f590f234d2c7e1fe8508/slackdown/templates.py#L103-L125
annayqho/TheCannon
code/lamost/li_giants/find_all_candidates.py
get_model
def get_model(LAB_DIR): """ Cannon model params """ coeffs = np.load("%s/coeffs.npz" %LAB_DIR)['arr_0'] scatters = np.load("%s/scatters.npz" %LAB_DIR)['arr_0'] chisqs = np.load("%s/chisqs.npz" %LAB_DIR)['arr_0'] pivots = np.load("%s/pivots.npz" %LAB_DIR)['arr_0'] return coeffs, scatters, chisqs, pivots
python
def get_model(LAB_DIR): """ Cannon model params """ coeffs = np.load("%s/coeffs.npz" %LAB_DIR)['arr_0'] scatters = np.load("%s/scatters.npz" %LAB_DIR)['arr_0'] chisqs = np.load("%s/chisqs.npz" %LAB_DIR)['arr_0'] pivots = np.load("%s/pivots.npz" %LAB_DIR)['arr_0'] return coeffs, scatters, chisqs, pivots
[ "def", "get_model", "(", "LAB_DIR", ")", ":", "coeffs", "=", "np", ".", "load", "(", "\"%s/coeffs.npz\"", "%", "LAB_DIR", ")", "[", "'arr_0'", "]", "scatters", "=", "np", ".", "load", "(", "\"%s/scatters.npz\"", "%", "LAB_DIR", ")", "[", "'arr_0'", "]", "chisqs", "=", "np", ".", "load", "(", "\"%s/chisqs.npz\"", "%", "LAB_DIR", ")", "[", "'arr_0'", "]", "pivots", "=", "np", ".", "load", "(", "\"%s/pivots.npz\"", "%", "LAB_DIR", ")", "[", "'arr_0'", "]", "return", "coeffs", ",", "scatters", ",", "chisqs", ",", "pivots" ]
Cannon model params
[ "Cannon", "model", "params" ]
train
https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/code/lamost/li_giants/find_all_candidates.py#L50-L56
annayqho/TheCannon
code/lamost/li_giants/find_all_candidates.py
get_labels
def get_labels(ids_find): """ Labels to make Cannon model spectra """ a = pyfits.open("%s/lamost_catalog_full.fits" %LAB_DIR) data = a[1].data a.close() id_all = data['lamost_id'] id_all = np.array(id_all) id_all = np.array([val.strip() for val in id_all]) snr_all = data['cannon_snrg'] chisq_all = data['cannon_chisq'] teff = data['cannon_teff'] logg = data['cannon_logg'] feh = data['cannon_m_h'] afe = data['cannon_alpha_m'] ak = data['cannon_a_k'] labels = np.vstack((teff,logg,feh,afe,ak)) choose = np.in1d(id_all, ids_find) id_choose = id_all[choose] label_choose = labels[:,choose] snr_choose = snr_all[choose] chisq_choose = chisq_all[choose] inds = np.array([np.where(id_choose==val)[0][0] for val in ids_find]) print(id_choose[inds][100]) print(ids_find[100]) return label_choose[:,inds], snr_choose[inds], chisq_choose[inds]
python
def get_labels(ids_find): """ Labels to make Cannon model spectra """ a = pyfits.open("%s/lamost_catalog_full.fits" %LAB_DIR) data = a[1].data a.close() id_all = data['lamost_id'] id_all = np.array(id_all) id_all = np.array([val.strip() for val in id_all]) snr_all = data['cannon_snrg'] chisq_all = data['cannon_chisq'] teff = data['cannon_teff'] logg = data['cannon_logg'] feh = data['cannon_m_h'] afe = data['cannon_alpha_m'] ak = data['cannon_a_k'] labels = np.vstack((teff,logg,feh,afe,ak)) choose = np.in1d(id_all, ids_find) id_choose = id_all[choose] label_choose = labels[:,choose] snr_choose = snr_all[choose] chisq_choose = chisq_all[choose] inds = np.array([np.where(id_choose==val)[0][0] for val in ids_find]) print(id_choose[inds][100]) print(ids_find[100]) return label_choose[:,inds], snr_choose[inds], chisq_choose[inds]
[ "def", "get_labels", "(", "ids_find", ")", ":", "a", "=", "pyfits", ".", "open", "(", "\"%s/lamost_catalog_full.fits\"", "%", "LAB_DIR", ")", "data", "=", "a", "[", "1", "]", ".", "data", "a", ".", "close", "(", ")", "id_all", "=", "data", "[", "'lamost_id'", "]", "id_all", "=", "np", ".", "array", "(", "id_all", ")", "id_all", "=", "np", ".", "array", "(", "[", "val", ".", "strip", "(", ")", "for", "val", "in", "id_all", "]", ")", "snr_all", "=", "data", "[", "'cannon_snrg'", "]", "chisq_all", "=", "data", "[", "'cannon_chisq'", "]", "teff", "=", "data", "[", "'cannon_teff'", "]", "logg", "=", "data", "[", "'cannon_logg'", "]", "feh", "=", "data", "[", "'cannon_m_h'", "]", "afe", "=", "data", "[", "'cannon_alpha_m'", "]", "ak", "=", "data", "[", "'cannon_a_k'", "]", "labels", "=", "np", ".", "vstack", "(", "(", "teff", ",", "logg", ",", "feh", ",", "afe", ",", "ak", ")", ")", "choose", "=", "np", ".", "in1d", "(", "id_all", ",", "ids_find", ")", "id_choose", "=", "id_all", "[", "choose", "]", "label_choose", "=", "labels", "[", ":", ",", "choose", "]", "snr_choose", "=", "snr_all", "[", "choose", "]", "chisq_choose", "=", "chisq_all", "[", "choose", "]", "inds", "=", "np", ".", "array", "(", "[", "np", ".", "where", "(", "id_choose", "==", "val", ")", "[", "0", "]", "[", "0", "]", "for", "val", "in", "ids_find", "]", ")", "print", "(", "id_choose", "[", "inds", "]", "[", "100", "]", ")", "print", "(", "ids_find", "[", "100", "]", ")", "return", "label_choose", "[", ":", ",", "inds", "]", ",", "snr_choose", "[", "inds", "]", ",", "chisq_choose", "[", "inds", "]" ]
Labels to make Cannon model spectra
[ "Labels", "to", "make", "Cannon", "model", "spectra" ]
train
https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/code/lamost/li_giants/find_all_candidates.py#L59-L83
annayqho/TheCannon
code/lamost/li_giants/find_all_candidates.py
get_normed_spectra
def get_normed_spectra(): """ Spectra to compare with models """ wl = np.load("%s/wl.npz" %LAB_DIR)['arr_0'] filenames = np.array( [SPEC_DIR + "/Spectra" + "/" + val for val in lamost_id]) grid, fluxes, ivars, npix, SNRs = lamost.load_spectra( lamost_id, input_grid=wl) ds = dataset.Dataset( wl, lamost_id, fluxes, ivars, [1], lamost_id[0:2], fluxes[0:2], ivars[0:2]) ds.continuum_normalize_gaussian_smoothing(L=50) np.savez(SPEC_DIR + "/" + "norm_flux.npz", ds.tr_flux) np.savez(SPEC_DIR + "/" + "norm_ivar.npz", ds.tr_ivar) return ds.tr_flux, ds.tr_ivar
python
def get_normed_spectra(): """ Spectra to compare with models """ wl = np.load("%s/wl.npz" %LAB_DIR)['arr_0'] filenames = np.array( [SPEC_DIR + "/Spectra" + "/" + val for val in lamost_id]) grid, fluxes, ivars, npix, SNRs = lamost.load_spectra( lamost_id, input_grid=wl) ds = dataset.Dataset( wl, lamost_id, fluxes, ivars, [1], lamost_id[0:2], fluxes[0:2], ivars[0:2]) ds.continuum_normalize_gaussian_smoothing(L=50) np.savez(SPEC_DIR + "/" + "norm_flux.npz", ds.tr_flux) np.savez(SPEC_DIR + "/" + "norm_ivar.npz", ds.tr_ivar) return ds.tr_flux, ds.tr_ivar
[ "def", "get_normed_spectra", "(", ")", ":", "wl", "=", "np", ".", "load", "(", "\"%s/wl.npz\"", "%", "LAB_DIR", ")", "[", "'arr_0'", "]", "filenames", "=", "np", ".", "array", "(", "[", "SPEC_DIR", "+", "\"/Spectra\"", "+", "\"/\"", "+", "val", "for", "val", "in", "lamost_id", "]", ")", "grid", ",", "fluxes", ",", "ivars", ",", "npix", ",", "SNRs", "=", "lamost", ".", "load_spectra", "(", "lamost_id", ",", "input_grid", "=", "wl", ")", "ds", "=", "dataset", ".", "Dataset", "(", "wl", ",", "lamost_id", ",", "fluxes", ",", "ivars", ",", "[", "1", "]", ",", "lamost_id", "[", "0", ":", "2", "]", ",", "fluxes", "[", "0", ":", "2", "]", ",", "ivars", "[", "0", ":", "2", "]", ")", "ds", ".", "continuum_normalize_gaussian_smoothing", "(", "L", "=", "50", ")", "np", ".", "savez", "(", "SPEC_DIR", "+", "\"/\"", "+", "\"norm_flux.npz\"", ",", "ds", ".", "tr_flux", ")", "np", ".", "savez", "(", "SPEC_DIR", "+", "\"/\"", "+", "\"norm_ivar.npz\"", ",", "ds", ".", "tr_ivar", ")", "return", "ds", ".", "tr_flux", ",", "ds", ".", "tr_ivar" ]
Spectra to compare with models
[ "Spectra", "to", "compare", "with", "models" ]
train
https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/code/lamost/li_giants/find_all_candidates.py#L86-L99
annayqho/TheCannon
code/lamost/li_giants/find_all_candidates.py
wget_files
def wget_files(): """ Pull the files from the LAMOST archive """ for f in lamost_id: short = (f.split('-')[2]).split('_')[0] filename = "%s/%s.gz" %(short,f) DIR = "/Users/annaho/Data/Li_Giants/Spectra_APOKASC" searchfor = "%s/%s.gz" %(DIR,f) if glob.glob(searchfor): print("done") else: #print(searchfor) os.system( "wget http://dr2.lamost.org/sas/fits/%s" %(filename)) new_filename = filename.split("_")[0] + "_" + filename.split("_")[2] os.system( "wget http://dr2.lamost.org/sas/fits/%s" %(new_filename))
python
def wget_files(): """ Pull the files from the LAMOST archive """ for f in lamost_id: short = (f.split('-')[2]).split('_')[0] filename = "%s/%s.gz" %(short,f) DIR = "/Users/annaho/Data/Li_Giants/Spectra_APOKASC" searchfor = "%s/%s.gz" %(DIR,f) if glob.glob(searchfor): print("done") else: #print(searchfor) os.system( "wget http://dr2.lamost.org/sas/fits/%s" %(filename)) new_filename = filename.split("_")[0] + "_" + filename.split("_")[2] os.system( "wget http://dr2.lamost.org/sas/fits/%s" %(new_filename))
[ "def", "wget_files", "(", ")", ":", "for", "f", "in", "lamost_id", ":", "short", "=", "(", "f", ".", "split", "(", "'-'", ")", "[", "2", "]", ")", ".", "split", "(", "'_'", ")", "[", "0", "]", "filename", "=", "\"%s/%s.gz\"", "%", "(", "short", ",", "f", ")", "DIR", "=", "\"/Users/annaho/Data/Li_Giants/Spectra_APOKASC\"", "searchfor", "=", "\"%s/%s.gz\"", "%", "(", "DIR", ",", "f", ")", "if", "glob", ".", "glob", "(", "searchfor", ")", ":", "print", "(", "\"done\"", ")", "else", ":", "#print(searchfor)", "os", ".", "system", "(", "\"wget http://dr2.lamost.org/sas/fits/%s\"", "%", "(", "filename", ")", ")", "new_filename", "=", "filename", ".", "split", "(", "\"_\"", ")", "[", "0", "]", "+", "\"_\"", "+", "filename", ".", "split", "(", "\"_\"", ")", "[", "2", "]", "os", ".", "system", "(", "\"wget http://dr2.lamost.org/sas/fits/%s\"", "%", "(", "new_filename", ")", ")" ]
Pull the files from the LAMOST archive
[ "Pull", "the", "files", "from", "the", "LAMOST", "archive" ]
train
https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/code/lamost/li_giants/find_all_candidates.py#L102-L117
annayqho/TheCannon
code/lamost/li_giants/find_apokasc_candidates.py
get_labels
def get_labels(): """ Labels to make Cannon model spectra """ cannon_teff = data['cannon_teff_2'] cannon_logg = data['cannon_logg_2'] cannon_m_h = data['cannon_m_h'] cannon_alpha_m = data['cannon_alpha_m'] cannon_a_k = data['cannon_a_k'] labels = np.vstack( (cannon_teff, cannon_logg, cannon_m_h, cannon_alpha_m, cannon_a_k)) cannon_chisq = data['cannon_chisq'] np.savez(DATA_DIR + "chisq.npz", labels) np.savez(DATA_DIR + "labels.npz", labels) snrg = data['cannon_snrg'] # snrg * 3 np.savez("snr.npz", snrg) return labels.T
python
def get_labels(): """ Labels to make Cannon model spectra """ cannon_teff = data['cannon_teff_2'] cannon_logg = data['cannon_logg_2'] cannon_m_h = data['cannon_m_h'] cannon_alpha_m = data['cannon_alpha_m'] cannon_a_k = data['cannon_a_k'] labels = np.vstack( (cannon_teff, cannon_logg, cannon_m_h, cannon_alpha_m, cannon_a_k)) cannon_chisq = data['cannon_chisq'] np.savez(DATA_DIR + "chisq.npz", labels) np.savez(DATA_DIR + "labels.npz", labels) snrg = data['cannon_snrg'] # snrg * 3 np.savez("snr.npz", snrg) return labels.T
[ "def", "get_labels", "(", ")", ":", "cannon_teff", "=", "data", "[", "'cannon_teff_2'", "]", "cannon_logg", "=", "data", "[", "'cannon_logg_2'", "]", "cannon_m_h", "=", "data", "[", "'cannon_m_h'", "]", "cannon_alpha_m", "=", "data", "[", "'cannon_alpha_m'", "]", "cannon_a_k", "=", "data", "[", "'cannon_a_k'", "]", "labels", "=", "np", ".", "vstack", "(", "(", "cannon_teff", ",", "cannon_logg", ",", "cannon_m_h", ",", "cannon_alpha_m", ",", "cannon_a_k", ")", ")", "cannon_chisq", "=", "data", "[", "'cannon_chisq'", "]", "np", ".", "savez", "(", "DATA_DIR", "+", "\"chisq.npz\"", ",", "labels", ")", "np", ".", "savez", "(", "DATA_DIR", "+", "\"labels.npz\"", ",", "labels", ")", "snrg", "=", "data", "[", "'cannon_snrg'", "]", "# snrg * 3", "np", ".", "savez", "(", "\"snr.npz\"", ",", "snrg", ")", "return", "labels", ".", "T" ]
Labels to make Cannon model spectra
[ "Labels", "to", "make", "Cannon", "model", "spectra" ]
train
https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/code/lamost/li_giants/find_apokasc_candidates.py#L30-L44
annayqho/TheCannon
code/lamost/abundances/calc_gradient_spectra.py
cannon_normalize
def cannon_normalize(spec_raw): """ Normalize according to The Cannon """ spec = np.array([spec_raw]) wl = np.arange(0, spec.shape[1]) w = continuum_normalization.gaussian_weight_matrix(wl, L=50) ivar = np.ones(spec.shape)*0.5 cont = continuum_normalization._find_cont_gaussian_smooth( wl, spec, ivar, w) norm_flux, norm_ivar = continuum_normalization._cont_norm( spec, ivar, cont) return norm_flux[0]
python
def cannon_normalize(spec_raw): """ Normalize according to The Cannon """ spec = np.array([spec_raw]) wl = np.arange(0, spec.shape[1]) w = continuum_normalization.gaussian_weight_matrix(wl, L=50) ivar = np.ones(spec.shape)*0.5 cont = continuum_normalization._find_cont_gaussian_smooth( wl, spec, ivar, w) norm_flux, norm_ivar = continuum_normalization._cont_norm( spec, ivar, cont) return norm_flux[0]
[ "def", "cannon_normalize", "(", "spec_raw", ")", ":", "spec", "=", "np", ".", "array", "(", "[", "spec_raw", "]", ")", "wl", "=", "np", ".", "arange", "(", "0", ",", "spec", ".", "shape", "[", "1", "]", ")", "w", "=", "continuum_normalization", ".", "gaussian_weight_matrix", "(", "wl", ",", "L", "=", "50", ")", "ivar", "=", "np", ".", "ones", "(", "spec", ".", "shape", ")", "*", "0.5", "cont", "=", "continuum_normalization", ".", "_find_cont_gaussian_smooth", "(", "wl", ",", "spec", ",", "ivar", ",", "w", ")", "norm_flux", ",", "norm_ivar", "=", "continuum_normalization", ".", "_cont_norm", "(", "spec", ",", "ivar", ",", "cont", ")", "return", "norm_flux", "[", "0", "]" ]
Normalize according to The Cannon
[ "Normalize", "according", "to", "The", "Cannon" ]
train
https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/code/lamost/abundances/calc_gradient_spectra.py#L25-L35
annayqho/TheCannon
code/lamost/abundances/calc_gradient_spectra.py
resample
def resample(grid, wl, flux): """ Resample spectrum onto desired grid """ flux_rs = (interpolate.interp1d(wl, flux))(grid) return flux_rs
python
def resample(grid, wl, flux): """ Resample spectrum onto desired grid """ flux_rs = (interpolate.interp1d(wl, flux))(grid) return flux_rs
[ "def", "resample", "(", "grid", ",", "wl", ",", "flux", ")", ":", "flux_rs", "=", "(", "interpolate", ".", "interp1d", "(", "wl", ",", "flux", ")", ")", "(", "grid", ")", "return", "flux_rs" ]
Resample spectrum onto desired grid
[ "Resample", "spectrum", "onto", "desired", "grid" ]
train
https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/code/lamost/abundances/calc_gradient_spectra.py#L38-L41
annayqho/TheCannon
code/lamost/abundances/calc_gradient_spectra.py
gen_cannon_grad_spec
def gen_cannon_grad_spec(choose, coeffs, pivots): """ Generate Cannon gradient spectra Parameters ---------- labels: default values for [teff, logg, feh, cfe, nfe, afe, ak] choose: val of cfe or nfe, whatever you're varying low: lowest val of cfe or nfe, whatever you're varying high: highest val of cfe or nfe, whatever you're varying """ base_labels = [4800, 2.5, 0.03, 0.10, -0.17, -0.17, 0, -0.16, -0.13, -0.15, 0.13, 0.08, 0.17, -0.062] label_names = np.array( ['TEFF', 'LOGG', 'AK', 'Al', 'Ca', 'C', 'Fe', 'Mg', 'Mn', 'Ni', 'N', 'O', 'Si', 'Ti']) label_atnum = np.array( [0, 1, -1, 13, 20, 6, 26, 12, 25, 28, 7, 8, 14, 22]) # Generate Cannon gradient spectra ind = np.where(label_atnum==choose)[0][0] low_lab = copy.copy(base_labels) high = base_labels[ind] if choose > 0: low = base_labels[ind] - 0.2 else: #temperature if choose != 0: print("warning...") low = base_labels[ind] - 200 low_lab[ind] = low lvec = (train_model._get_lvec(np.array([low_lab]), pivots))[0] model_low = np.dot(coeffs, lvec) lvec = (train_model._get_lvec(np.array([base_labels]), pivots))[0] model_high = np.dot(coeffs, lvec) grad_spec = (model_high - model_low) / (high - low) return grad_spec
python
def gen_cannon_grad_spec(choose, coeffs, pivots): """ Generate Cannon gradient spectra Parameters ---------- labels: default values for [teff, logg, feh, cfe, nfe, afe, ak] choose: val of cfe or nfe, whatever you're varying low: lowest val of cfe or nfe, whatever you're varying high: highest val of cfe or nfe, whatever you're varying """ base_labels = [4800, 2.5, 0.03, 0.10, -0.17, -0.17, 0, -0.16, -0.13, -0.15, 0.13, 0.08, 0.17, -0.062] label_names = np.array( ['TEFF', 'LOGG', 'AK', 'Al', 'Ca', 'C', 'Fe', 'Mg', 'Mn', 'Ni', 'N', 'O', 'Si', 'Ti']) label_atnum = np.array( [0, 1, -1, 13, 20, 6, 26, 12, 25, 28, 7, 8, 14, 22]) # Generate Cannon gradient spectra ind = np.where(label_atnum==choose)[0][0] low_lab = copy.copy(base_labels) high = base_labels[ind] if choose > 0: low = base_labels[ind] - 0.2 else: #temperature if choose != 0: print("warning...") low = base_labels[ind] - 200 low_lab[ind] = low lvec = (train_model._get_lvec(np.array([low_lab]), pivots))[0] model_low = np.dot(coeffs, lvec) lvec = (train_model._get_lvec(np.array([base_labels]), pivots))[0] model_high = np.dot(coeffs, lvec) grad_spec = (model_high - model_low) / (high - low) return grad_spec
[ "def", "gen_cannon_grad_spec", "(", "choose", ",", "coeffs", ",", "pivots", ")", ":", "base_labels", "=", "[", "4800", ",", "2.5", ",", "0.03", ",", "0.10", ",", "-", "0.17", ",", "-", "0.17", ",", "0", ",", "-", "0.16", ",", "-", "0.13", ",", "-", "0.15", ",", "0.13", ",", "0.08", ",", "0.17", ",", "-", "0.062", "]", "label_names", "=", "np", ".", "array", "(", "[", "'TEFF'", ",", "'LOGG'", ",", "'AK'", ",", "'Al'", ",", "'Ca'", ",", "'C'", ",", "'Fe'", ",", "'Mg'", ",", "'Mn'", ",", "'Ni'", ",", "'N'", ",", "'O'", ",", "'Si'", ",", "'Ti'", "]", ")", "label_atnum", "=", "np", ".", "array", "(", "[", "0", ",", "1", ",", "-", "1", ",", "13", ",", "20", ",", "6", ",", "26", ",", "12", ",", "25", ",", "28", ",", "7", ",", "8", ",", "14", ",", "22", "]", ")", "# Generate Cannon gradient spectra", "ind", "=", "np", ".", "where", "(", "label_atnum", "==", "choose", ")", "[", "0", "]", "[", "0", "]", "low_lab", "=", "copy", ".", "copy", "(", "base_labels", ")", "high", "=", "base_labels", "[", "ind", "]", "if", "choose", ">", "0", ":", "low", "=", "base_labels", "[", "ind", "]", "-", "0.2", "else", ":", "#temperature", "if", "choose", "!=", "0", ":", "print", "(", "\"warning...\"", ")", "low", "=", "base_labels", "[", "ind", "]", "-", "200", "low_lab", "[", "ind", "]", "=", "low", "lvec", "=", "(", "train_model", ".", "_get_lvec", "(", "np", ".", "array", "(", "[", "low_lab", "]", ")", ",", "pivots", ")", ")", "[", "0", "]", "model_low", "=", "np", ".", "dot", "(", "coeffs", ",", "lvec", ")", "lvec", "=", "(", "train_model", ".", "_get_lvec", "(", "np", ".", "array", "(", "[", "base_labels", "]", ")", ",", "pivots", ")", ")", "[", "0", "]", "model_high", "=", "np", ".", "dot", "(", "coeffs", ",", "lvec", ")", "grad_spec", "=", "(", "model_high", "-", "model_low", ")", "/", "(", "high", "-", "low", ")", "return", "grad_spec" ]
Generate Cannon gradient spectra Parameters ---------- labels: default values for [teff, logg, feh, cfe, nfe, afe, ak] choose: val of cfe or nfe, whatever you're varying low: lowest val of cfe or nfe, whatever you're varying high: highest val of cfe or nfe, whatever you're varying
[ "Generate", "Cannon", "gradient", "spectra" ]
train
https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/code/lamost/abundances/calc_gradient_spectra.py#L62-L94
annayqho/TheCannon
code/lamost/abundances/calc_gradient_spectra.py
get_model_spec_ting
def get_model_spec_ting(atomic_number): """ X_u_template[0:2] are teff, logg, vturb in km/s X_u_template[:,3] -> onward, put atomic number atomic_number is 6 for C, 7 for N """ DATA_DIR = "/Users/annaho/Data/LAMOST/Mass_And_Age" temp = np.load("%s/X_u_template_KGh_res=1800.npz" %DATA_DIR) X_u_template = temp["X_u_template"] wl = temp["wavelength"] grad_spec = X_u_template[:,atomic_number] return wl, grad_spec
python
def get_model_spec_ting(atomic_number): """ X_u_template[0:2] are teff, logg, vturb in km/s X_u_template[:,3] -> onward, put atomic number atomic_number is 6 for C, 7 for N """ DATA_DIR = "/Users/annaho/Data/LAMOST/Mass_And_Age" temp = np.load("%s/X_u_template_KGh_res=1800.npz" %DATA_DIR) X_u_template = temp["X_u_template"] wl = temp["wavelength"] grad_spec = X_u_template[:,atomic_number] return wl, grad_spec
[ "def", "get_model_spec_ting", "(", "atomic_number", ")", ":", "DATA_DIR", "=", "\"/Users/annaho/Data/LAMOST/Mass_And_Age\"", "temp", "=", "np", ".", "load", "(", "\"%s/X_u_template_KGh_res=1800.npz\"", "%", "DATA_DIR", ")", "X_u_template", "=", "temp", "[", "\"X_u_template\"", "]", "wl", "=", "temp", "[", "\"wavelength\"", "]", "grad_spec", "=", "X_u_template", "[", ":", ",", "atomic_number", "]", "return", "wl", ",", "grad_spec" ]
X_u_template[0:2] are teff, logg, vturb in km/s X_u_template[:,3] -> onward, put atomic number atomic_number is 6 for C, 7 for N
[ "X_u_template", "[", "0", ":", "2", "]", "are", "teff", "logg", "vturb", "in", "km", "/", "s", "X_u_template", "[", ":", "3", "]", "-", ">", "onward", "put", "atomic", "number", "atomic_number", "is", "6", "for", "C", "7", "for", "N" ]
train
https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/code/lamost/abundances/calc_gradient_spectra.py#L129-L140
annayqho/TheCannon
code/lamost/li_giants/residuals.py
get_residuals
def get_residuals(ds, m): """ Using the dataset and model object, calculate the residuals and return Parameters ---------- ds: dataset object m: model object Return ------ residuals: array of residuals, spec minus model spec """ model_spectra = get_model_spectra(ds, m) resid = ds.test_flux - model_spectra return resid
python
def get_residuals(ds, m): """ Using the dataset and model object, calculate the residuals and return Parameters ---------- ds: dataset object m: model object Return ------ residuals: array of residuals, spec minus model spec """ model_spectra = get_model_spectra(ds, m) resid = ds.test_flux - model_spectra return resid
[ "def", "get_residuals", "(", "ds", ",", "m", ")", ":", "model_spectra", "=", "get_model_spectra", "(", "ds", ",", "m", ")", "resid", "=", "ds", ".", "test_flux", "-", "model_spectra", "return", "resid" ]
Using the dataset and model object, calculate the residuals and return Parameters ---------- ds: dataset object m: model object Return ------ residuals: array of residuals, spec minus model spec
[ "Using", "the", "dataset", "and", "model", "object", "calculate", "the", "residuals", "and", "return" ]
train
https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/code/lamost/li_giants/residuals.py#L27-L40
annayqho/TheCannon
code/lamost/li_giants/residuals.py
load_model
def load_model(): """ Load the model Parameters ---------- direc: directory with all of the model files Returns ------- m: model object """ direc = "/home/annaho/TheCannon/code/lamost/mass_age/cn" m = model.CannonModel(2) m.coeffs = np.load(direc + "/coeffs.npz")['arr_0'][0:3626,:] # no cols m.scatters = np.load(direc + "/scatters.npz")['arr_0'][0:3626] # no cols m.chisqs = np.load(direc + "/chisqs.npz")['arr_0'][0:3626] # no cols m.pivots = np.load(direc + "/pivots.npz")['arr_0'] return m
python
def load_model(): """ Load the model Parameters ---------- direc: directory with all of the model files Returns ------- m: model object """ direc = "/home/annaho/TheCannon/code/lamost/mass_age/cn" m = model.CannonModel(2) m.coeffs = np.load(direc + "/coeffs.npz")['arr_0'][0:3626,:] # no cols m.scatters = np.load(direc + "/scatters.npz")['arr_0'][0:3626] # no cols m.chisqs = np.load(direc + "/chisqs.npz")['arr_0'][0:3626] # no cols m.pivots = np.load(direc + "/pivots.npz")['arr_0'] return m
[ "def", "load_model", "(", ")", ":", "direc", "=", "\"/home/annaho/TheCannon/code/lamost/mass_age/cn\"", "m", "=", "model", ".", "CannonModel", "(", "2", ")", "m", ".", "coeffs", "=", "np", ".", "load", "(", "direc", "+", "\"/coeffs.npz\"", ")", "[", "'arr_0'", "]", "[", "0", ":", "3626", ",", ":", "]", "# no cols", "m", ".", "scatters", "=", "np", ".", "load", "(", "direc", "+", "\"/scatters.npz\"", ")", "[", "'arr_0'", "]", "[", "0", ":", "3626", "]", "# no cols", "m", ".", "chisqs", "=", "np", ".", "load", "(", "direc", "+", "\"/chisqs.npz\"", ")", "[", "'arr_0'", "]", "[", "0", ":", "3626", "]", "# no cols", "m", ".", "pivots", "=", "np", ".", "load", "(", "direc", "+", "\"/pivots.npz\"", ")", "[", "'arr_0'", "]", "return", "m" ]
Load the model Parameters ---------- direc: directory with all of the model files Returns ------- m: model object
[ "Load", "the", "model" ]
train
https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/code/lamost/li_giants/residuals.py#L48-L65
annayqho/TheCannon
code/lamost/li_giants/residuals.py
load_dataset
def load_dataset(date): """ Load the dataset for a single date Parameters ---------- date: the date (string) for which to load the data & dataset Returns ------- ds: the dataset object """ LAB_DIR = "/home/annaho/TheCannon/data/lamost" WL_DIR = "/home/annaho/TheCannon/code/lamost/mass_age/cn" SPEC_DIR = "/home/annaho/TheCannon/code/apogee_lamost/xcalib_4labels/output" wl = np.load(WL_DIR + "/wl_cols.npz")['arr_0'][0:3626] # no cols ds = dataset.Dataset(wl, [], [], [], [], [], [], []) test_label = np.load("%s/%s_all_cannon_labels.npz" %(LAB_DIR,date))['arr_0'] ds.test_label_vals = test_label a = np.load("%s/%s_norm.npz" %(SPEC_DIR,date)) ds.test_flux = a['arr_0'] ds.test_ivar = a['arr_1'] ds.test_ID = np.load("%s/%s_ids.npz" %(SPEC_DIR,date))['arr_0'] return ds
python
def load_dataset(date): """ Load the dataset for a single date Parameters ---------- date: the date (string) for which to load the data & dataset Returns ------- ds: the dataset object """ LAB_DIR = "/home/annaho/TheCannon/data/lamost" WL_DIR = "/home/annaho/TheCannon/code/lamost/mass_age/cn" SPEC_DIR = "/home/annaho/TheCannon/code/apogee_lamost/xcalib_4labels/output" wl = np.load(WL_DIR + "/wl_cols.npz")['arr_0'][0:3626] # no cols ds = dataset.Dataset(wl, [], [], [], [], [], [], []) test_label = np.load("%s/%s_all_cannon_labels.npz" %(LAB_DIR,date))['arr_0'] ds.test_label_vals = test_label a = np.load("%s/%s_norm.npz" %(SPEC_DIR,date)) ds.test_flux = a['arr_0'] ds.test_ivar = a['arr_1'] ds.test_ID = np.load("%s/%s_ids.npz" %(SPEC_DIR,date))['arr_0'] return ds
[ "def", "load_dataset", "(", "date", ")", ":", "LAB_DIR", "=", "\"/home/annaho/TheCannon/data/lamost\"", "WL_DIR", "=", "\"/home/annaho/TheCannon/code/lamost/mass_age/cn\"", "SPEC_DIR", "=", "\"/home/annaho/TheCannon/code/apogee_lamost/xcalib_4labels/output\"", "wl", "=", "np", ".", "load", "(", "WL_DIR", "+", "\"/wl_cols.npz\"", ")", "[", "'arr_0'", "]", "[", "0", ":", "3626", "]", "# no cols", "ds", "=", "dataset", ".", "Dataset", "(", "wl", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ")", "test_label", "=", "np", ".", "load", "(", "\"%s/%s_all_cannon_labels.npz\"", "%", "(", "LAB_DIR", ",", "date", ")", ")", "[", "'arr_0'", "]", "ds", ".", "test_label_vals", "=", "test_label", "a", "=", "np", ".", "load", "(", "\"%s/%s_norm.npz\"", "%", "(", "SPEC_DIR", ",", "date", ")", ")", "ds", ".", "test_flux", "=", "a", "[", "'arr_0'", "]", "ds", ".", "test_ivar", "=", "a", "[", "'arr_1'", "]", "ds", ".", "test_ID", "=", "np", ".", "load", "(", "\"%s/%s_ids.npz\"", "%", "(", "SPEC_DIR", ",", "date", ")", ")", "[", "'arr_0'", "]", "return", "ds" ]
Load the dataset for a single date Parameters ---------- date: the date (string) for which to load the data & dataset Returns ------- ds: the dataset object
[ "Load", "the", "dataset", "for", "a", "single", "date", "Parameters", "----------", "date", ":", "the", "date", "(", "string", ")", "for", "which", "to", "load", "the", "data", "&", "dataset" ]
train
https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/code/lamost/li_giants/residuals.py#L68-L90
annayqho/TheCannon
code/lamost/li_giants/residuals.py
fit_gaussian
def fit_gaussian(x, y, yerr, p0): """ Fit a Gaussian to the data """ try: popt, pcov = curve_fit(gaussian, x, y, sigma=yerr, p0=p0, absolute_sigma=True) except RuntimeError: return [0],[0] return popt, pcov
python
def fit_gaussian(x, y, yerr, p0): """ Fit a Gaussian to the data """ try: popt, pcov = curve_fit(gaussian, x, y, sigma=yerr, p0=p0, absolute_sigma=True) except RuntimeError: return [0],[0] return popt, pcov
[ "def", "fit_gaussian", "(", "x", ",", "y", ",", "yerr", ",", "p0", ")", ":", "try", ":", "popt", ",", "pcov", "=", "curve_fit", "(", "gaussian", ",", "x", ",", "y", ",", "sigma", "=", "yerr", ",", "p0", "=", "p0", ",", "absolute_sigma", "=", "True", ")", "except", "RuntimeError", ":", "return", "[", "0", "]", ",", "[", "0", "]", "return", "popt", ",", "pcov" ]
Fit a Gaussian to the data
[ "Fit", "a", "Gaussian", "to", "the", "data" ]
train
https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/code/lamost/li_giants/residuals.py#L93-L99
annayqho/TheCannon
code/lamost/li_giants/residuals.py
select
def select(yerrs, amps, amp_errs, widths): """ criteria for keeping an object """ keep_1 = np.logical_and(amps < 0, widths > 1) keep_2 = np.logical_and(np.abs(amps) > 3*yerrs, amp_errs < 3*np.abs(amps)) keep = np.logical_and(keep_1, keep_2) return keep
python
def select(yerrs, amps, amp_errs, widths): """ criteria for keeping an object """ keep_1 = np.logical_and(amps < 0, widths > 1) keep_2 = np.logical_and(np.abs(amps) > 3*yerrs, amp_errs < 3*np.abs(amps)) keep = np.logical_and(keep_1, keep_2) return keep
[ "def", "select", "(", "yerrs", ",", "amps", ",", "amp_errs", ",", "widths", ")", ":", "keep_1", "=", "np", ".", "logical_and", "(", "amps", "<", "0", ",", "widths", ">", "1", ")", "keep_2", "=", "np", ".", "logical_and", "(", "np", ".", "abs", "(", "amps", ")", ">", "3", "*", "yerrs", ",", "amp_errs", "<", "3", "*", "np", ".", "abs", "(", "amps", ")", ")", "keep", "=", "np", ".", "logical_and", "(", "keep_1", ",", "keep_2", ")", "return", "keep" ]
criteria for keeping an object
[ "criteria", "for", "keeping", "an", "object" ]
train
https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/code/lamost/li_giants/residuals.py#L131-L136
annayqho/TheCannon
code/lamost/li_giants/residuals.py
run_all
def run_all(): """ Load the data that we're using to search for Li-rich giants. Store it in dataset and model objects. """ DATA_DIR = "/home/annaho/TheCannon/code/apogee_lamost/xcalib_4labels" dates = os.listdir("/home/share/LAMOST/DR2/DR2_release") dates = np.array(dates) dates = np.delete(dates, np.where(dates=='.directory')[0][0]) dates = np.delete(dates, np.where(dates=='all_folders.list')[0][0]) dates = np.delete(dates, np.where(dates=='dr2.lis')[0][0]) for date in dates: if glob.glob("*%s*.txt" %date): print("%s done" %date) else: print("running %s" %date) run_one_date(date)
python
def run_all(): """ Load the data that we're using to search for Li-rich giants. Store it in dataset and model objects. """ DATA_DIR = "/home/annaho/TheCannon/code/apogee_lamost/xcalib_4labels" dates = os.listdir("/home/share/LAMOST/DR2/DR2_release") dates = np.array(dates) dates = np.delete(dates, np.where(dates=='.directory')[0][0]) dates = np.delete(dates, np.where(dates=='all_folders.list')[0][0]) dates = np.delete(dates, np.where(dates=='dr2.lis')[0][0]) for date in dates: if glob.glob("*%s*.txt" %date): print("%s done" %date) else: print("running %s" %date) run_one_date(date)
[ "def", "run_all", "(", ")", ":", "DATA_DIR", "=", "\"/home/annaho/TheCannon/code/apogee_lamost/xcalib_4labels\"", "dates", "=", "os", ".", "listdir", "(", "\"/home/share/LAMOST/DR2/DR2_release\"", ")", "dates", "=", "np", ".", "array", "(", "dates", ")", "dates", "=", "np", ".", "delete", "(", "dates", ",", "np", ".", "where", "(", "dates", "==", "'.directory'", ")", "[", "0", "]", "[", "0", "]", ")", "dates", "=", "np", ".", "delete", "(", "dates", ",", "np", ".", "where", "(", "dates", "==", "'all_folders.list'", ")", "[", "0", "]", "[", "0", "]", ")", "dates", "=", "np", ".", "delete", "(", "dates", ",", "np", ".", "where", "(", "dates", "==", "'dr2.lis'", ")", "[", "0", "]", "[", "0", "]", ")", "for", "date", "in", "dates", ":", "if", "glob", ".", "glob", "(", "\"*%s*.txt\"", "%", "date", ")", ":", "print", "(", "\"%s done\"", "%", "date", ")", "else", ":", "print", "(", "\"running %s\"", "%", "date", ")", "run_one_date", "(", "date", ")" ]
Load the data that we're using to search for Li-rich giants. Store it in dataset and model objects.
[ "Load", "the", "data", "that", "we", "re", "using", "to", "search", "for", "Li", "-", "rich", "giants", ".", "Store", "it", "in", "dataset", "and", "model", "objects", "." ]
train
https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/code/lamost/li_giants/residuals.py#L212-L227
annayqho/TheCannon
code/lamost/mass_age/paper_plots/cn_features.py
gen_cannon_grad_spec
def gen_cannon_grad_spec(base_labels, choose, low, high, coeffs, pivots): """ Generate Cannon gradient spectra Parameters ---------- labels: default values for [teff, logg, feh, cfe, nfe, afe, ak] choose: val of cfe or nfe, whatever you're varying low: lowest val of cfe or nfe, whatever you're varying high: highest val of cfe or nfe, whatever you're varying """ # Generate Cannon gradient spectra low_lab = copy.copy(base_labels) low_lab[choose] = low lvec = (train_model._get_lvec(np.array([low_lab]), pivots))[0] model_low = np.dot(coeffs, lvec) high_lab = copy.copy(base_labels) high_lab[choose] = high lvec = (train_model._get_lvec(np.array([high_lab]), pivots))[0] model_high = np.dot(coeffs, lvec) grad_spec = (model_high - model_low) / (high - low) return grad_spec
python
def gen_cannon_grad_spec(base_labels, choose, low, high, coeffs, pivots): """ Generate Cannon gradient spectra Parameters ---------- labels: default values for [teff, logg, feh, cfe, nfe, afe, ak] choose: val of cfe or nfe, whatever you're varying low: lowest val of cfe or nfe, whatever you're varying high: highest val of cfe or nfe, whatever you're varying """ # Generate Cannon gradient spectra low_lab = copy.copy(base_labels) low_lab[choose] = low lvec = (train_model._get_lvec(np.array([low_lab]), pivots))[0] model_low = np.dot(coeffs, lvec) high_lab = copy.copy(base_labels) high_lab[choose] = high lvec = (train_model._get_lvec(np.array([high_lab]), pivots))[0] model_high = np.dot(coeffs, lvec) grad_spec = (model_high - model_low) / (high - low) return grad_spec
[ "def", "gen_cannon_grad_spec", "(", "base_labels", ",", "choose", ",", "low", ",", "high", ",", "coeffs", ",", "pivots", ")", ":", "# Generate Cannon gradient spectra", "low_lab", "=", "copy", ".", "copy", "(", "base_labels", ")", "low_lab", "[", "choose", "]", "=", "low", "lvec", "=", "(", "train_model", ".", "_get_lvec", "(", "np", ".", "array", "(", "[", "low_lab", "]", ")", ",", "pivots", ")", ")", "[", "0", "]", "model_low", "=", "np", ".", "dot", "(", "coeffs", ",", "lvec", ")", "high_lab", "=", "copy", ".", "copy", "(", "base_labels", ")", "high_lab", "[", "choose", "]", "=", "high", "lvec", "=", "(", "train_model", ".", "_get_lvec", "(", "np", ".", "array", "(", "[", "high_lab", "]", ")", ",", "pivots", ")", ")", "[", "0", "]", "model_high", "=", "np", ".", "dot", "(", "coeffs", ",", "lvec", ")", "grad_spec", "=", "(", "model_high", "-", "model_low", ")", "/", "(", "high", "-", "low", ")", "return", "grad_spec" ]
Generate Cannon gradient spectra Parameters ---------- labels: default values for [teff, logg, feh, cfe, nfe, afe, ak] choose: val of cfe or nfe, whatever you're varying low: lowest val of cfe or nfe, whatever you're varying high: highest val of cfe or nfe, whatever you're varying
[ "Generate", "Cannon", "gradient", "spectra" ]
train
https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/code/lamost/mass_age/paper_plots/cn_features.py#L55-L75
annayqho/TheCannon
code/lamost/mass_age/cn/write_table.py
get_err
def get_err(snr): """ Get approximate scatters from SNR as determined in the code, snr_test.py Order: Teff, logg, MH, CM, NM, alpha """ quad_terms = np.array( [3.11e-3, 1.10e-5, 6.95e-6, 5.05e-6, 4.65e-6, 4.10e-6]) lin_terms = np.array( [-0.869, -2.07e-3, -1.40e-3, -1.03e-3, -1.13e-3, -7.29e-4]) consts = np.array([104, 0.200, 0.117, 0.114, 0.156, 0.0624]) err = quad_terms[:,None] * snr**2 + lin_terms[:,None] * snr + consts[:,None] # find the minimum of the quadratic function min_snr = -lin_terms / (2*quad_terms) min_err = quad_terms * min_snr**2 + lin_terms * min_snr + consts mask = (snr[:,None] > min_snr).T for i in range(0,len(min_err)): err[i][mask[i]] = min_err[i] return err
python
def get_err(snr): """ Get approximate scatters from SNR as determined in the code, snr_test.py Order: Teff, logg, MH, CM, NM, alpha """ quad_terms = np.array( [3.11e-3, 1.10e-5, 6.95e-6, 5.05e-6, 4.65e-6, 4.10e-6]) lin_terms = np.array( [-0.869, -2.07e-3, -1.40e-3, -1.03e-3, -1.13e-3, -7.29e-4]) consts = np.array([104, 0.200, 0.117, 0.114, 0.156, 0.0624]) err = quad_terms[:,None] * snr**2 + lin_terms[:,None] * snr + consts[:,None] # find the minimum of the quadratic function min_snr = -lin_terms / (2*quad_terms) min_err = quad_terms * min_snr**2 + lin_terms * min_snr + consts mask = (snr[:,None] > min_snr).T for i in range(0,len(min_err)): err[i][mask[i]] = min_err[i] return err
[ "def", "get_err", "(", "snr", ")", ":", "quad_terms", "=", "np", ".", "array", "(", "[", "3.11e-3", ",", "1.10e-5", ",", "6.95e-6", ",", "5.05e-6", ",", "4.65e-6", ",", "4.10e-6", "]", ")", "lin_terms", "=", "np", ".", "array", "(", "[", "-", "0.869", ",", "-", "2.07e-3", ",", "-", "1.40e-3", ",", "-", "1.03e-3", ",", "-", "1.13e-3", ",", "-", "7.29e-4", "]", ")", "consts", "=", "np", ".", "array", "(", "[", "104", ",", "0.200", ",", "0.117", ",", "0.114", ",", "0.156", ",", "0.0624", "]", ")", "err", "=", "quad_terms", "[", ":", ",", "None", "]", "*", "snr", "**", "2", "+", "lin_terms", "[", ":", ",", "None", "]", "*", "snr", "+", "consts", "[", ":", ",", "None", "]", "# find the minimum of the quadratic function", "min_snr", "=", "-", "lin_terms", "/", "(", "2", "*", "quad_terms", ")", "min_err", "=", "quad_terms", "*", "min_snr", "**", "2", "+", "lin_terms", "*", "min_snr", "+", "consts", "mask", "=", "(", "snr", "[", ":", ",", "None", "]", ">", "min_snr", ")", ".", "T", "for", "i", "in", "range", "(", "0", ",", "len", "(", "min_err", ")", ")", ":", "err", "[", "i", "]", "[", "mask", "[", "i", "]", "]", "=", "min_err", "[", "i", "]", "return", "err" ]
Get approximate scatters from SNR as determined in the code, snr_test.py Order: Teff, logg, MH, CM, NM, alpha
[ "Get", "approximate", "scatters", "from", "SNR", "as", "determined", "in", "the", "code", "snr_test", ".", "py", "Order", ":", "Teff", "logg", "MH", "CM", "NM", "alpha" ]
train
https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/code/lamost/mass_age/cn/write_table.py#L12-L31
annayqho/TheCannon
code/lamost/mass_age/cn/get_colors.py
get_colors
def get_colors(catalog): """ Pull colors from catalog Parameters ---------- catalog: filename """ print("Get Colors") a = pyfits.open(catalog) data = a[1].data a.close() all_ids = data['LAMOST_ID_1'] all_ids = np.array([val.strip() for val in all_ids]) # G magnitude gmag = data['gpmag'] gmag_err = data['e_gpmag'] # R magnitude rmag = data['rpmag'] rmag_err = data['e_rpmag'] # I magnitude imag = data['ipmag'] imag_err = data['e_ipmag'] # W1 W1 = data['W1mag'] W1_err = data['e_W1mag'] # W1 W2 = data['W2mag'] W2_err = data['e_W2mag'] # J magnitude Jmag = data['Jmag'] Jmag_err = data['e_Jmag'] # H magnitude Hmag = data['Hmag'] Hmag_err = data['e_Hmag'] # K magnitude Kmag = data['Kmag'] Kmag_err = data['e_Kmag'] # Stack mag = np.vstack(( gmag, rmag, imag, Jmag, Hmag, Kmag, W2, W1)) # 8, nobj mag_err = np.vstack(( gmag_err, rmag_err, imag_err, Jmag_err, Hmag_err, Kmag_err, W2_err, W1_err)) # Make g-r, r-i, i-J, etc col = mag[:-1] - mag[1:] col_ivar = 1/(mag_err[:-1]**2 + mag_err[1:]**2) # There's something wrong with the i-band, I think..so the second color r-i #bad = col[:,1] < 0.0 #col_ivar[bad] = 0.0 return all_ids, col, col_ivar
python
def get_colors(catalog): """ Pull colors from catalog Parameters ---------- catalog: filename """ print("Get Colors") a = pyfits.open(catalog) data = a[1].data a.close() all_ids = data['LAMOST_ID_1'] all_ids = np.array([val.strip() for val in all_ids]) # G magnitude gmag = data['gpmag'] gmag_err = data['e_gpmag'] # R magnitude rmag = data['rpmag'] rmag_err = data['e_rpmag'] # I magnitude imag = data['ipmag'] imag_err = data['e_ipmag'] # W1 W1 = data['W1mag'] W1_err = data['e_W1mag'] # W1 W2 = data['W2mag'] W2_err = data['e_W2mag'] # J magnitude Jmag = data['Jmag'] Jmag_err = data['e_Jmag'] # H magnitude Hmag = data['Hmag'] Hmag_err = data['e_Hmag'] # K magnitude Kmag = data['Kmag'] Kmag_err = data['e_Kmag'] # Stack mag = np.vstack(( gmag, rmag, imag, Jmag, Hmag, Kmag, W2, W1)) # 8, nobj mag_err = np.vstack(( gmag_err, rmag_err, imag_err, Jmag_err, Hmag_err, Kmag_err, W2_err, W1_err)) # Make g-r, r-i, i-J, etc col = mag[:-1] - mag[1:] col_ivar = 1/(mag_err[:-1]**2 + mag_err[1:]**2) # There's something wrong with the i-band, I think..so the second color r-i #bad = col[:,1] < 0.0 #col_ivar[bad] = 0.0 return all_ids, col, col_ivar
[ "def", "get_colors", "(", "catalog", ")", ":", "print", "(", "\"Get Colors\"", ")", "a", "=", "pyfits", ".", "open", "(", "catalog", ")", "data", "=", "a", "[", "1", "]", ".", "data", "a", ".", "close", "(", ")", "all_ids", "=", "data", "[", "'LAMOST_ID_1'", "]", "all_ids", "=", "np", ".", "array", "(", "[", "val", ".", "strip", "(", ")", "for", "val", "in", "all_ids", "]", ")", "# G magnitude", "gmag", "=", "data", "[", "'gpmag'", "]", "gmag_err", "=", "data", "[", "'e_gpmag'", "]", "# R magnitude", "rmag", "=", "data", "[", "'rpmag'", "]", "rmag_err", "=", "data", "[", "'e_rpmag'", "]", "# I magnitude", "imag", "=", "data", "[", "'ipmag'", "]", "imag_err", "=", "data", "[", "'e_ipmag'", "]", "# W1", "W1", "=", "data", "[", "'W1mag'", "]", "W1_err", "=", "data", "[", "'e_W1mag'", "]", "# W1", "W2", "=", "data", "[", "'W2mag'", "]", "W2_err", "=", "data", "[", "'e_W2mag'", "]", "# J magnitude", "Jmag", "=", "data", "[", "'Jmag'", "]", "Jmag_err", "=", "data", "[", "'e_Jmag'", "]", "# H magnitude", "Hmag", "=", "data", "[", "'Hmag'", "]", "Hmag_err", "=", "data", "[", "'e_Hmag'", "]", "# K magnitude", "Kmag", "=", "data", "[", "'Kmag'", "]", "Kmag_err", "=", "data", "[", "'e_Kmag'", "]", "# Stack", "mag", "=", "np", ".", "vstack", "(", "(", "gmag", ",", "rmag", ",", "imag", ",", "Jmag", ",", "Hmag", ",", "Kmag", ",", "W2", ",", "W1", ")", ")", "# 8, nobj", "mag_err", "=", "np", ".", "vstack", "(", "(", "gmag_err", ",", "rmag_err", ",", "imag_err", ",", "Jmag_err", ",", "Hmag_err", ",", "Kmag_err", ",", "W2_err", ",", "W1_err", ")", ")", "# Make g-r, r-i, i-J, etc", "col", "=", "mag", "[", ":", "-", "1", "]", "-", "mag", "[", "1", ":", "]", "col_ivar", "=", "1", "/", "(", "mag_err", "[", ":", "-", "1", "]", "**", "2", "+", "mag_err", "[", "1", ":", "]", "**", "2", ")", "# There's something wrong with the i-band, I think..so the second color r-i", "#bad = col[:,1] < 0.0", "#col_ivar[bad] = 0.0", "return", "all_ids", ",", "col", ",", "col_ivar" ]
Pull colors from catalog Parameters ---------- catalog: filename
[ "Pull", "colors", "from", "catalog" ]
train
https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/code/lamost/mass_age/cn/get_colors.py#L4-L56
annayqho/TheCannon
spectral_model.py
draw_spectra
def draw_spectra(md, ds): """ Generate best-fit spectra for all the test objects Parameters ---------- md: model The Cannon spectral model ds: Dataset Dataset object Returns ------- best_fluxes: ndarray The best-fit test fluxes best_ivars: The best-fit test inverse variances """ coeffs_all, covs, scatters, red_chisqs, pivots, label_vector = model.model nstars = len(dataset.test_SNR) cannon_flux = np.zeros(dataset.test_flux.shape) cannon_ivar = np.zeros(dataset.test_ivar.shape) for i in range(nstars): x = label_vector[:,i,:] spec_fit = np.einsum('ij, ij->i', x, coeffs_all) cannon_flux[i,:] = spec_fit bad = dataset.test_ivar[i,:] == SMALL**2 cannon_ivar[i,:][~bad] = 1. / scatters[~bad] ** 2 return cannon_flux, cannon_ivar
python
def draw_spectra(md, ds): """ Generate best-fit spectra for all the test objects Parameters ---------- md: model The Cannon spectral model ds: Dataset Dataset object Returns ------- best_fluxes: ndarray The best-fit test fluxes best_ivars: The best-fit test inverse variances """ coeffs_all, covs, scatters, red_chisqs, pivots, label_vector = model.model nstars = len(dataset.test_SNR) cannon_flux = np.zeros(dataset.test_flux.shape) cannon_ivar = np.zeros(dataset.test_ivar.shape) for i in range(nstars): x = label_vector[:,i,:] spec_fit = np.einsum('ij, ij->i', x, coeffs_all) cannon_flux[i,:] = spec_fit bad = dataset.test_ivar[i,:] == SMALL**2 cannon_ivar[i,:][~bad] = 1. / scatters[~bad] ** 2 return cannon_flux, cannon_ivar
[ "def", "draw_spectra", "(", "md", ",", "ds", ")", ":", "coeffs_all", ",", "covs", ",", "scatters", ",", "red_chisqs", ",", "pivots", ",", "label_vector", "=", "model", ".", "model", "nstars", "=", "len", "(", "dataset", ".", "test_SNR", ")", "cannon_flux", "=", "np", ".", "zeros", "(", "dataset", ".", "test_flux", ".", "shape", ")", "cannon_ivar", "=", "np", ".", "zeros", "(", "dataset", ".", "test_ivar", ".", "shape", ")", "for", "i", "in", "range", "(", "nstars", ")", ":", "x", "=", "label_vector", "[", ":", ",", "i", ",", ":", "]", "spec_fit", "=", "np", ".", "einsum", "(", "'ij, ij->i'", ",", "x", ",", "coeffs_all", ")", "cannon_flux", "[", "i", ",", ":", "]", "=", "spec_fit", "bad", "=", "dataset", ".", "test_ivar", "[", "i", ",", ":", "]", "==", "SMALL", "**", "2", "cannon_ivar", "[", "i", ",", ":", "]", "[", "~", "bad", "]", "=", "1.", "/", "scatters", "[", "~", "bad", "]", "**", "2", "return", "cannon_flux", ",", "cannon_ivar" ]
Generate best-fit spectra for all the test objects Parameters ---------- md: model The Cannon spectral model ds: Dataset Dataset object Returns ------- best_fluxes: ndarray The best-fit test fluxes best_ivars: The best-fit test inverse variances
[ "Generate", "best", "-", "fit", "spectra", "for", "all", "the", "test", "objects" ]
train
https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/spectral_model.py#L3-L32
annayqho/TheCannon
spectral_model.py
overlay_spectra
def overlay_spectra(model, dataset): """ Run a series of diagnostics on the fitted spectra Parameters ---------- model: model best-fit Cannon spectral model dataset: Dataset original spectra """ best_flux, best_ivar = draw_spectra(model, dataset) coeffs_all, covs, scatters, all_chisqs, pivots, label_vector = model.model # Overplot original spectra with best-fit spectra print("Overplotting spectra for ten random stars") res = dataset.test_flux-best_flux lambdas = dataset.wl npix = len(lambdas) nstars = best_flux.shape[0] pickstars = [] for i in range(10): pickstars.append(random.randrange(0, nstars-1)) for i in pickstars: print("Star %s" % i) ID = dataset.test_ID[i] spec_orig = dataset.test_flux[i,:] bad = dataset.test_flux[i,:] == 0 lambdas = np.ma.array(lambdas, mask=bad, dtype=float) npix = len(lambdas.compressed()) spec_orig = np.ma.array(dataset.test_flux[i,:], mask=bad) spec_fit = np.ma.array(best_flux[i,:], mask=bad) ivars_orig = np.ma.array(dataset.test_ivar[i,:], mask=bad) ivars_fit = np.ma.array(best_ivar[i,:], mask=bad) red_chisq = np.sum(all_chisqs[:,i], axis=0) / (npix - coeffs_all.shape[1]) red_chisq = np.round(red_chisq, 2) fig,axarr = plt.subplots(2) ax1 = axarr[0] im = ax1.scatter(lambdas, spec_orig, label="Orig Spec", c=1 / np.sqrt(ivars_orig), s=10) ax1.scatter(lambdas, spec_fit, label="Cannon Spec", c='r', s=10) ax1.errorbar(lambdas, spec_fit, yerr=1/np.sqrt(ivars_fit), fmt='ro', ms=1, alpha=0.7) ax1.set_xlabel(r"Wavelength $\lambda (\AA)$") ax1.set_ylabel("Normalized flux") ax1.set_title("Spectrum Fit: %s" % ID) ax1.set_title("Spectrum Fit") ax1.set_xlim(min(lambdas.compressed())-10, max(lambdas.compressed())+10) ax1.legend(loc='lower center', fancybox=True, shadow=True) ax2 = axarr[1] ax2.scatter(spec_orig, spec_fit, c=1/np.sqrt(ivars_orig), alpha=0.7) ax2.errorbar(spec_orig, spec_fit, yerr=1 / np.sqrt(ivars_fit), ecolor='k', fmt="none", ms=1, alpha=0.7) #fig.subplots_adjust(right=0.8) #cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7]) fig.colorbar() #fig.colorbar( # im, cax=cbar_ax, # label="Uncertainties on the Fluxes from the Original Spectrum") xlims = ax2.get_xlim() ylims = ax2.get_ylim() lims = [np.min([xlims, ylims]), np.max([xlims, ylims])] ax2.plot(lims, lims, 'k-', alpha=0.75) textstr = "Red Chi Sq: %s" % red_chisq props = dict(boxstyle='round', facecolor='palevioletred', alpha=0.5) ax2.text(0.05, 0.95, textstr, transform=ax2.transAxes, fontsize=14, verticalalignment='top', bbox=props) ax2.set_xlim(xlims) ax2.set_ylim(ylims) ax2.set_xlabel("Orig Fluxes") ax2.set_ylabel("Fitted Fluxes") plt.tight_layout() filename = "best_fit_spec_Star%s.png" % i print("Saved as %s" % filename) fig.savefig(filename) plt.close(fig)
python
def overlay_spectra(model, dataset): """ Run a series of diagnostics on the fitted spectra Parameters ---------- model: model best-fit Cannon spectral model dataset: Dataset original spectra """ best_flux, best_ivar = draw_spectra(model, dataset) coeffs_all, covs, scatters, all_chisqs, pivots, label_vector = model.model # Overplot original spectra with best-fit spectra print("Overplotting spectra for ten random stars") res = dataset.test_flux-best_flux lambdas = dataset.wl npix = len(lambdas) nstars = best_flux.shape[0] pickstars = [] for i in range(10): pickstars.append(random.randrange(0, nstars-1)) for i in pickstars: print("Star %s" % i) ID = dataset.test_ID[i] spec_orig = dataset.test_flux[i,:] bad = dataset.test_flux[i,:] == 0 lambdas = np.ma.array(lambdas, mask=bad, dtype=float) npix = len(lambdas.compressed()) spec_orig = np.ma.array(dataset.test_flux[i,:], mask=bad) spec_fit = np.ma.array(best_flux[i,:], mask=bad) ivars_orig = np.ma.array(dataset.test_ivar[i,:], mask=bad) ivars_fit = np.ma.array(best_ivar[i,:], mask=bad) red_chisq = np.sum(all_chisqs[:,i], axis=0) / (npix - coeffs_all.shape[1]) red_chisq = np.round(red_chisq, 2) fig,axarr = plt.subplots(2) ax1 = axarr[0] im = ax1.scatter(lambdas, spec_orig, label="Orig Spec", c=1 / np.sqrt(ivars_orig), s=10) ax1.scatter(lambdas, spec_fit, label="Cannon Spec", c='r', s=10) ax1.errorbar(lambdas, spec_fit, yerr=1/np.sqrt(ivars_fit), fmt='ro', ms=1, alpha=0.7) ax1.set_xlabel(r"Wavelength $\lambda (\AA)$") ax1.set_ylabel("Normalized flux") ax1.set_title("Spectrum Fit: %s" % ID) ax1.set_title("Spectrum Fit") ax1.set_xlim(min(lambdas.compressed())-10, max(lambdas.compressed())+10) ax1.legend(loc='lower center', fancybox=True, shadow=True) ax2 = axarr[1] ax2.scatter(spec_orig, spec_fit, c=1/np.sqrt(ivars_orig), alpha=0.7) ax2.errorbar(spec_orig, spec_fit, yerr=1 / np.sqrt(ivars_fit), ecolor='k', fmt="none", ms=1, alpha=0.7) #fig.subplots_adjust(right=0.8) #cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7]) fig.colorbar() #fig.colorbar( # im, cax=cbar_ax, # label="Uncertainties on the Fluxes from the Original Spectrum") xlims = ax2.get_xlim() ylims = ax2.get_ylim() lims = [np.min([xlims, ylims]), np.max([xlims, ylims])] ax2.plot(lims, lims, 'k-', alpha=0.75) textstr = "Red Chi Sq: %s" % red_chisq props = dict(boxstyle='round', facecolor='palevioletred', alpha=0.5) ax2.text(0.05, 0.95, textstr, transform=ax2.transAxes, fontsize=14, verticalalignment='top', bbox=props) ax2.set_xlim(xlims) ax2.set_ylim(ylims) ax2.set_xlabel("Orig Fluxes") ax2.set_ylabel("Fitted Fluxes") plt.tight_layout() filename = "best_fit_spec_Star%s.png" % i print("Saved as %s" % filename) fig.savefig(filename) plt.close(fig)
[ "def", "overlay_spectra", "(", "model", ",", "dataset", ")", ":", "best_flux", ",", "best_ivar", "=", "draw_spectra", "(", "model", ",", "dataset", ")", "coeffs_all", ",", "covs", ",", "scatters", ",", "all_chisqs", ",", "pivots", ",", "label_vector", "=", "model", ".", "model", "# Overplot original spectra with best-fit spectra", "print", "(", "\"Overplotting spectra for ten random stars\"", ")", "res", "=", "dataset", ".", "test_flux", "-", "best_flux", "lambdas", "=", "dataset", ".", "wl", "npix", "=", "len", "(", "lambdas", ")", "nstars", "=", "best_flux", ".", "shape", "[", "0", "]", "pickstars", "=", "[", "]", "for", "i", "in", "range", "(", "10", ")", ":", "pickstars", ".", "append", "(", "random", ".", "randrange", "(", "0", ",", "nstars", "-", "1", ")", ")", "for", "i", "in", "pickstars", ":", "print", "(", "\"Star %s\"", "%", "i", ")", "ID", "=", "dataset", ".", "test_ID", "[", "i", "]", "spec_orig", "=", "dataset", ".", "test_flux", "[", "i", ",", ":", "]", "bad", "=", "dataset", ".", "test_flux", "[", "i", ",", ":", "]", "==", "0", "lambdas", "=", "np", ".", "ma", ".", "array", "(", "lambdas", ",", "mask", "=", "bad", ",", "dtype", "=", "float", ")", "npix", "=", "len", "(", "lambdas", ".", "compressed", "(", ")", ")", "spec_orig", "=", "np", ".", "ma", ".", "array", "(", "dataset", ".", "test_flux", "[", "i", ",", ":", "]", ",", "mask", "=", "bad", ")", "spec_fit", "=", "np", ".", "ma", ".", "array", "(", "best_flux", "[", "i", ",", ":", "]", ",", "mask", "=", "bad", ")", "ivars_orig", "=", "np", ".", "ma", ".", "array", "(", "dataset", ".", "test_ivar", "[", "i", ",", ":", "]", ",", "mask", "=", "bad", ")", "ivars_fit", "=", "np", ".", "ma", ".", "array", "(", "best_ivar", "[", "i", ",", ":", "]", ",", "mask", "=", "bad", ")", "red_chisq", "=", "np", ".", "sum", "(", "all_chisqs", "[", ":", ",", "i", "]", ",", "axis", "=", "0", ")", "/", "(", "npix", "-", "coeffs_all", ".", "shape", "[", "1", "]", ")", "red_chisq", "=", "np", ".", "round", "(", "red_chisq", ",", "2", ")", "fig", ",", "axarr", "=", "plt", ".", "subplots", "(", "2", ")", "ax1", "=", "axarr", "[", "0", "]", "im", "=", "ax1", ".", "scatter", "(", "lambdas", ",", "spec_orig", ",", "label", "=", "\"Orig Spec\"", ",", "c", "=", "1", "/", "np", ".", "sqrt", "(", "ivars_orig", ")", ",", "s", "=", "10", ")", "ax1", ".", "scatter", "(", "lambdas", ",", "spec_fit", ",", "label", "=", "\"Cannon Spec\"", ",", "c", "=", "'r'", ",", "s", "=", "10", ")", "ax1", ".", "errorbar", "(", "lambdas", ",", "spec_fit", ",", "yerr", "=", "1", "/", "np", ".", "sqrt", "(", "ivars_fit", ")", ",", "fmt", "=", "'ro'", ",", "ms", "=", "1", ",", "alpha", "=", "0.7", ")", "ax1", ".", "set_xlabel", "(", "r\"Wavelength $\\lambda (\\AA)$\"", ")", "ax1", ".", "set_ylabel", "(", "\"Normalized flux\"", ")", "ax1", ".", "set_title", "(", "\"Spectrum Fit: %s\"", "%", "ID", ")", "ax1", ".", "set_title", "(", "\"Spectrum Fit\"", ")", "ax1", ".", "set_xlim", "(", "min", "(", "lambdas", ".", "compressed", "(", ")", ")", "-", "10", ",", "max", "(", "lambdas", ".", "compressed", "(", ")", ")", "+", "10", ")", "ax1", ".", "legend", "(", "loc", "=", "'lower center'", ",", "fancybox", "=", "True", ",", "shadow", "=", "True", ")", "ax2", "=", "axarr", "[", "1", "]", "ax2", ".", "scatter", "(", "spec_orig", ",", "spec_fit", ",", "c", "=", "1", "/", "np", ".", "sqrt", "(", "ivars_orig", ")", ",", "alpha", "=", "0.7", ")", "ax2", ".", "errorbar", "(", "spec_orig", ",", "spec_fit", ",", "yerr", "=", "1", "/", "np", ".", "sqrt", "(", "ivars_fit", ")", ",", "ecolor", "=", "'k'", ",", "fmt", "=", "\"none\"", ",", "ms", "=", "1", ",", "alpha", "=", "0.7", ")", "#fig.subplots_adjust(right=0.8)", "#cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])", "fig", ".", "colorbar", "(", ")", "#fig.colorbar(", "# im, cax=cbar_ax,", "# label=\"Uncertainties on the Fluxes from the Original Spectrum\")", "xlims", "=", "ax2", ".", "get_xlim", "(", ")", "ylims", "=", "ax2", ".", "get_ylim", "(", ")", "lims", "=", "[", "np", ".", "min", "(", "[", "xlims", ",", "ylims", "]", ")", ",", "np", ".", "max", "(", "[", "xlims", ",", "ylims", "]", ")", "]", "ax2", ".", "plot", "(", "lims", ",", "lims", ",", "'k-'", ",", "alpha", "=", "0.75", ")", "textstr", "=", "\"Red Chi Sq: %s\"", "%", "red_chisq", "props", "=", "dict", "(", "boxstyle", "=", "'round'", ",", "facecolor", "=", "'palevioletred'", ",", "alpha", "=", "0.5", ")", "ax2", ".", "text", "(", "0.05", ",", "0.95", ",", "textstr", ",", "transform", "=", "ax2", ".", "transAxes", ",", "fontsize", "=", "14", ",", "verticalalignment", "=", "'top'", ",", "bbox", "=", "props", ")", "ax2", ".", "set_xlim", "(", "xlims", ")", "ax2", ".", "set_ylim", "(", "ylims", ")", "ax2", ".", "set_xlabel", "(", "\"Orig Fluxes\"", ")", "ax2", ".", "set_ylabel", "(", "\"Fitted Fluxes\"", ")", "plt", ".", "tight_layout", "(", ")", "filename", "=", "\"best_fit_spec_Star%s.png\"", "%", "i", "print", "(", "\"Saved as %s\"", "%", "filename", ")", "fig", ".", "savefig", "(", "filename", ")", "plt", ".", "close", "(", "fig", ")" ]
Run a series of diagnostics on the fitted spectra Parameters ---------- model: model best-fit Cannon spectral model dataset: Dataset original spectra
[ "Run", "a", "series", "of", "diagnostics", "on", "the", "fitted", "spectra" ]
train
https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/spectral_model.py#L35-L111
annayqho/TheCannon
spectral_model.py
residuals
def residuals(cannon_set, dataset): """ Stack spectrum fit residuals, sort by each label. Include histogram of the RMS at each pixel. Parameters ---------- cannon_set: Dataset best-fit Cannon spectra dataset: Dataset original spectra """ print("Stacking spectrum fit residuals") res = dataset.test_fluxes - cannon_set.test_fluxes bad = dataset.test_ivars == SMALL**2 err = np.zeros(len(dataset.test_ivars)) err = np.sqrt(1. / dataset.test_ivars + 1. / cannon_set.test_ivars) res_norm = res / err res_norm = np.ma.array(res_norm, mask=(np.ones_like(res_norm) * (np.std(res_norm,axis=0) == 0))) res_norm = np.ma.compress_cols(res_norm) for i in range(len(cannon_set.get_plotting_labels())): label_name = cannon_set.get_plotting_labels()[i] print("Plotting residuals sorted by %s" % label_name) label_vals = cannon_set.tr_label_vals[:,i] sorted_res = res_norm[np.argsort(label_vals)] mu = np.mean(sorted_res.flatten()) sigma = np.std(sorted_res.flatten()) left, width = 0.1, 0.65 bottom, height = 0.1, 0.65 bottom_h = left_h = left+width+0.1 rect_scatter = [left, bottom, width, height] rect_histx = [left, bottom_h, width, 0.1] rect_histy = [left_h, bottom, 0.1, height] plt.figure() axScatter = plt.axes(rect_scatter) axHistx = plt.axes(rect_histx) axHisty = plt.axes(rect_histy) im = axScatter.imshow(sorted_res, cmap=plt.cm.bwr_r, interpolation="nearest", vmin=mu - 3. * sigma, vmax=mu + 3. * sigma, aspect='auto', origin='lower', extent=[0, len(dataset.wl), min(label_vals), max(label_vals)]) cax, kw = colorbar.make_axes(axScatter.axes, location='bottom') plt.colorbar(im, cax=cax, orientation='horizontal') axScatter.set_title( r"Spectral Residuals Sorted by ${0:s}$".format(label_name)) axScatter.set_xlabel("Pixels") axScatter.set_ylabel(r"$%s$" % label_name) axHisty.hist(np.std(res_norm,axis=1)[~np.isnan(np.std(res_norm, axis=1))], orientation='horizontal', range=[0,2]) axHisty.axhline(y=1, c='k', linewidth=3, label="y=1") axHisty.legend(bbox_to_anchor=(0., 0.8, 1., .102), prop={'family':'serif', 'size':'small'}) axHisty.text(1.0, 0.5, "Distribution of Stdev of Star Residuals", verticalalignment='center', transform=axHisty.transAxes, rotation=270) axHisty.set_ylabel("Standard Deviation") start, end = axHisty.get_xlim() axHisty.xaxis.set_ticks(np.linspace(start, end, 3)) axHisty.set_xlabel("Number of Stars") axHisty.xaxis.set_label_position("top") axHistx.hist(np.std(res_norm, axis=0)[~np.isnan(np.std(res_norm, axis=0))], range=[0.8,1.1]) axHistx.axvline(x=1, c='k', linewidth=3, label="x=1") axHistx.set_title("Distribution of Stdev of Pixel Residuals") axHistx.set_xlabel("Standard Deviation") axHistx.set_ylabel("Number of Pixels") start, end = axHistx.get_ylim() axHistx.yaxis.set_ticks(np.linspace(start, end, 3)) axHistx.legend() filename = "residuals_sorted_by_label_%s.png" % i plt.savefig(filename) print("File saved as %s" % filename) plt.close() # Auto-correlation of mean residuals print("Plotting Auto-Correlation of Mean Residuals") mean_res = res_norm.mean(axis=0) autocorr = np.correlate(mean_res, mean_res, mode="full") pkwidth = int(len(autocorr)/2-np.argmin(autocorr)) xmin = int(len(autocorr)/2)-pkwidth xmax = int(len(autocorr)/2)+pkwidth zoom_x = np.linspace(xmin, xmax, len(autocorr[xmin:xmax])) fig, axarr = plt.subplots(2) axarr[0].plot(autocorr) axarr[0].set_title("Autocorrelation of Mean Spectral Residual") axarr[0].set_xlabel("Lag (# Pixels)") axarr[0].set_ylabel("Autocorrelation") axarr[1].plot(zoom_x, autocorr[xmin:xmax]) axarr[1].set_title("Central Peak, Zoomed") axarr[1].set_xlabel("Lag (# Pixels)") axarr[1].set_ylabel("Autocorrelation") filename = "residuals_autocorr.png" plt.savefig(filename) print("saved %s" % filename) plt.close()
python
def residuals(cannon_set, dataset): """ Stack spectrum fit residuals, sort by each label. Include histogram of the RMS at each pixel. Parameters ---------- cannon_set: Dataset best-fit Cannon spectra dataset: Dataset original spectra """ print("Stacking spectrum fit residuals") res = dataset.test_fluxes - cannon_set.test_fluxes bad = dataset.test_ivars == SMALL**2 err = np.zeros(len(dataset.test_ivars)) err = np.sqrt(1. / dataset.test_ivars + 1. / cannon_set.test_ivars) res_norm = res / err res_norm = np.ma.array(res_norm, mask=(np.ones_like(res_norm) * (np.std(res_norm,axis=0) == 0))) res_norm = np.ma.compress_cols(res_norm) for i in range(len(cannon_set.get_plotting_labels())): label_name = cannon_set.get_plotting_labels()[i] print("Plotting residuals sorted by %s" % label_name) label_vals = cannon_set.tr_label_vals[:,i] sorted_res = res_norm[np.argsort(label_vals)] mu = np.mean(sorted_res.flatten()) sigma = np.std(sorted_res.flatten()) left, width = 0.1, 0.65 bottom, height = 0.1, 0.65 bottom_h = left_h = left+width+0.1 rect_scatter = [left, bottom, width, height] rect_histx = [left, bottom_h, width, 0.1] rect_histy = [left_h, bottom, 0.1, height] plt.figure() axScatter = plt.axes(rect_scatter) axHistx = plt.axes(rect_histx) axHisty = plt.axes(rect_histy) im = axScatter.imshow(sorted_res, cmap=plt.cm.bwr_r, interpolation="nearest", vmin=mu - 3. * sigma, vmax=mu + 3. * sigma, aspect='auto', origin='lower', extent=[0, len(dataset.wl), min(label_vals), max(label_vals)]) cax, kw = colorbar.make_axes(axScatter.axes, location='bottom') plt.colorbar(im, cax=cax, orientation='horizontal') axScatter.set_title( r"Spectral Residuals Sorted by ${0:s}$".format(label_name)) axScatter.set_xlabel("Pixels") axScatter.set_ylabel(r"$%s$" % label_name) axHisty.hist(np.std(res_norm,axis=1)[~np.isnan(np.std(res_norm, axis=1))], orientation='horizontal', range=[0,2]) axHisty.axhline(y=1, c='k', linewidth=3, label="y=1") axHisty.legend(bbox_to_anchor=(0., 0.8, 1., .102), prop={'family':'serif', 'size':'small'}) axHisty.text(1.0, 0.5, "Distribution of Stdev of Star Residuals", verticalalignment='center', transform=axHisty.transAxes, rotation=270) axHisty.set_ylabel("Standard Deviation") start, end = axHisty.get_xlim() axHisty.xaxis.set_ticks(np.linspace(start, end, 3)) axHisty.set_xlabel("Number of Stars") axHisty.xaxis.set_label_position("top") axHistx.hist(np.std(res_norm, axis=0)[~np.isnan(np.std(res_norm, axis=0))], range=[0.8,1.1]) axHistx.axvline(x=1, c='k', linewidth=3, label="x=1") axHistx.set_title("Distribution of Stdev of Pixel Residuals") axHistx.set_xlabel("Standard Deviation") axHistx.set_ylabel("Number of Pixels") start, end = axHistx.get_ylim() axHistx.yaxis.set_ticks(np.linspace(start, end, 3)) axHistx.legend() filename = "residuals_sorted_by_label_%s.png" % i plt.savefig(filename) print("File saved as %s" % filename) plt.close() # Auto-correlation of mean residuals print("Plotting Auto-Correlation of Mean Residuals") mean_res = res_norm.mean(axis=0) autocorr = np.correlate(mean_res, mean_res, mode="full") pkwidth = int(len(autocorr)/2-np.argmin(autocorr)) xmin = int(len(autocorr)/2)-pkwidth xmax = int(len(autocorr)/2)+pkwidth zoom_x = np.linspace(xmin, xmax, len(autocorr[xmin:xmax])) fig, axarr = plt.subplots(2) axarr[0].plot(autocorr) axarr[0].set_title("Autocorrelation of Mean Spectral Residual") axarr[0].set_xlabel("Lag (# Pixels)") axarr[0].set_ylabel("Autocorrelation") axarr[1].plot(zoom_x, autocorr[xmin:xmax]) axarr[1].set_title("Central Peak, Zoomed") axarr[1].set_xlabel("Lag (# Pixels)") axarr[1].set_ylabel("Autocorrelation") filename = "residuals_autocorr.png" plt.savefig(filename) print("saved %s" % filename) plt.close()
[ "def", "residuals", "(", "cannon_set", ",", "dataset", ")", ":", "print", "(", "\"Stacking spectrum fit residuals\"", ")", "res", "=", "dataset", ".", "test_fluxes", "-", "cannon_set", ".", "test_fluxes", "bad", "=", "dataset", ".", "test_ivars", "==", "SMALL", "**", "2", "err", "=", "np", ".", "zeros", "(", "len", "(", "dataset", ".", "test_ivars", ")", ")", "err", "=", "np", ".", "sqrt", "(", "1.", "/", "dataset", ".", "test_ivars", "+", "1.", "/", "cannon_set", ".", "test_ivars", ")", "res_norm", "=", "res", "/", "err", "res_norm", "=", "np", ".", "ma", ".", "array", "(", "res_norm", ",", "mask", "=", "(", "np", ".", "ones_like", "(", "res_norm", ")", "*", "(", "np", ".", "std", "(", "res_norm", ",", "axis", "=", "0", ")", "==", "0", ")", ")", ")", "res_norm", "=", "np", ".", "ma", ".", "compress_cols", "(", "res_norm", ")", "for", "i", "in", "range", "(", "len", "(", "cannon_set", ".", "get_plotting_labels", "(", ")", ")", ")", ":", "label_name", "=", "cannon_set", ".", "get_plotting_labels", "(", ")", "[", "i", "]", "print", "(", "\"Plotting residuals sorted by %s\"", "%", "label_name", ")", "label_vals", "=", "cannon_set", ".", "tr_label_vals", "[", ":", ",", "i", "]", "sorted_res", "=", "res_norm", "[", "np", ".", "argsort", "(", "label_vals", ")", "]", "mu", "=", "np", ".", "mean", "(", "sorted_res", ".", "flatten", "(", ")", ")", "sigma", "=", "np", ".", "std", "(", "sorted_res", ".", "flatten", "(", ")", ")", "left", ",", "width", "=", "0.1", ",", "0.65", "bottom", ",", "height", "=", "0.1", ",", "0.65", "bottom_h", "=", "left_h", "=", "left", "+", "width", "+", "0.1", "rect_scatter", "=", "[", "left", ",", "bottom", ",", "width", ",", "height", "]", "rect_histx", "=", "[", "left", ",", "bottom_h", ",", "width", ",", "0.1", "]", "rect_histy", "=", "[", "left_h", ",", "bottom", ",", "0.1", ",", "height", "]", "plt", ".", "figure", "(", ")", "axScatter", "=", "plt", ".", "axes", "(", "rect_scatter", ")", "axHistx", "=", "plt", ".", "axes", "(", "rect_histx", ")", "axHisty", "=", "plt", ".", "axes", "(", "rect_histy", ")", "im", "=", "axScatter", ".", "imshow", "(", "sorted_res", ",", "cmap", "=", "plt", ".", "cm", ".", "bwr_r", ",", "interpolation", "=", "\"nearest\"", ",", "vmin", "=", "mu", "-", "3.", "*", "sigma", ",", "vmax", "=", "mu", "+", "3.", "*", "sigma", ",", "aspect", "=", "'auto'", ",", "origin", "=", "'lower'", ",", "extent", "=", "[", "0", ",", "len", "(", "dataset", ".", "wl", ")", ",", "min", "(", "label_vals", ")", ",", "max", "(", "label_vals", ")", "]", ")", "cax", ",", "kw", "=", "colorbar", ".", "make_axes", "(", "axScatter", ".", "axes", ",", "location", "=", "'bottom'", ")", "plt", ".", "colorbar", "(", "im", ",", "cax", "=", "cax", ",", "orientation", "=", "'horizontal'", ")", "axScatter", ".", "set_title", "(", "r\"Spectral Residuals Sorted by ${0:s}$\"", ".", "format", "(", "label_name", ")", ")", "axScatter", ".", "set_xlabel", "(", "\"Pixels\"", ")", "axScatter", ".", "set_ylabel", "(", "r\"$%s$\"", "%", "label_name", ")", "axHisty", ".", "hist", "(", "np", ".", "std", "(", "res_norm", ",", "axis", "=", "1", ")", "[", "~", "np", ".", "isnan", "(", "np", ".", "std", "(", "res_norm", ",", "axis", "=", "1", ")", ")", "]", ",", "orientation", "=", "'horizontal'", ",", "range", "=", "[", "0", ",", "2", "]", ")", "axHisty", ".", "axhline", "(", "y", "=", "1", ",", "c", "=", "'k'", ",", "linewidth", "=", "3", ",", "label", "=", "\"y=1\"", ")", "axHisty", ".", "legend", "(", "bbox_to_anchor", "=", "(", "0.", ",", "0.8", ",", "1.", ",", ".102", ")", ",", "prop", "=", "{", "'family'", ":", "'serif'", ",", "'size'", ":", "'small'", "}", ")", "axHisty", ".", "text", "(", "1.0", ",", "0.5", ",", "\"Distribution of Stdev of Star Residuals\"", ",", "verticalalignment", "=", "'center'", ",", "transform", "=", "axHisty", ".", "transAxes", ",", "rotation", "=", "270", ")", "axHisty", ".", "set_ylabel", "(", "\"Standard Deviation\"", ")", "start", ",", "end", "=", "axHisty", ".", "get_xlim", "(", ")", "axHisty", ".", "xaxis", ".", "set_ticks", "(", "np", ".", "linspace", "(", "start", ",", "end", ",", "3", ")", ")", "axHisty", ".", "set_xlabel", "(", "\"Number of Stars\"", ")", "axHisty", ".", "xaxis", ".", "set_label_position", "(", "\"top\"", ")", "axHistx", ".", "hist", "(", "np", ".", "std", "(", "res_norm", ",", "axis", "=", "0", ")", "[", "~", "np", ".", "isnan", "(", "np", ".", "std", "(", "res_norm", ",", "axis", "=", "0", ")", ")", "]", ",", "range", "=", "[", "0.8", ",", "1.1", "]", ")", "axHistx", ".", "axvline", "(", "x", "=", "1", ",", "c", "=", "'k'", ",", "linewidth", "=", "3", ",", "label", "=", "\"x=1\"", ")", "axHistx", ".", "set_title", "(", "\"Distribution of Stdev of Pixel Residuals\"", ")", "axHistx", ".", "set_xlabel", "(", "\"Standard Deviation\"", ")", "axHistx", ".", "set_ylabel", "(", "\"Number of Pixels\"", ")", "start", ",", "end", "=", "axHistx", ".", "get_ylim", "(", ")", "axHistx", ".", "yaxis", ".", "set_ticks", "(", "np", ".", "linspace", "(", "start", ",", "end", ",", "3", ")", ")", "axHistx", ".", "legend", "(", ")", "filename", "=", "\"residuals_sorted_by_label_%s.png\"", "%", "i", "plt", ".", "savefig", "(", "filename", ")", "print", "(", "\"File saved as %s\"", "%", "filename", ")", "plt", ".", "close", "(", ")", "# Auto-correlation of mean residuals", "print", "(", "\"Plotting Auto-Correlation of Mean Residuals\"", ")", "mean_res", "=", "res_norm", ".", "mean", "(", "axis", "=", "0", ")", "autocorr", "=", "np", ".", "correlate", "(", "mean_res", ",", "mean_res", ",", "mode", "=", "\"full\"", ")", "pkwidth", "=", "int", "(", "len", "(", "autocorr", ")", "/", "2", "-", "np", ".", "argmin", "(", "autocorr", ")", ")", "xmin", "=", "int", "(", "len", "(", "autocorr", ")", "/", "2", ")", "-", "pkwidth", "xmax", "=", "int", "(", "len", "(", "autocorr", ")", "/", "2", ")", "+", "pkwidth", "zoom_x", "=", "np", ".", "linspace", "(", "xmin", ",", "xmax", ",", "len", "(", "autocorr", "[", "xmin", ":", "xmax", "]", ")", ")", "fig", ",", "axarr", "=", "plt", ".", "subplots", "(", "2", ")", "axarr", "[", "0", "]", ".", "plot", "(", "autocorr", ")", "axarr", "[", "0", "]", ".", "set_title", "(", "\"Autocorrelation of Mean Spectral Residual\"", ")", "axarr", "[", "0", "]", ".", "set_xlabel", "(", "\"Lag (# Pixels)\"", ")", "axarr", "[", "0", "]", ".", "set_ylabel", "(", "\"Autocorrelation\"", ")", "axarr", "[", "1", "]", ".", "plot", "(", "zoom_x", ",", "autocorr", "[", "xmin", ":", "xmax", "]", ")", "axarr", "[", "1", "]", ".", "set_title", "(", "\"Central Peak, Zoomed\"", ")", "axarr", "[", "1", "]", ".", "set_xlabel", "(", "\"Lag (# Pixels)\"", ")", "axarr", "[", "1", "]", ".", "set_ylabel", "(", "\"Autocorrelation\"", ")", "filename", "=", "\"residuals_autocorr.png\"", "plt", ".", "savefig", "(", "filename", ")", "print", "(", "\"saved %s\"", "%", "filename", ")", "plt", ".", "close", "(", ")" ]
Stack spectrum fit residuals, sort by each label. Include histogram of the RMS at each pixel. Parameters ---------- cannon_set: Dataset best-fit Cannon spectra dataset: Dataset original spectra
[ "Stack", "spectrum", "fit", "residuals", "sort", "by", "each", "label", ".", "Include", "histogram", "of", "the", "RMS", "at", "each", "pixel", "." ]
train
https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/spectral_model.py#L114-L211
annayqho/TheCannon
TheCannon/find_continuum_pixels.py
_find_contpix_given_cuts
def _find_contpix_given_cuts(f_cut, sig_cut, wl, fluxes, ivars): """ Find and return continuum pixels given the flux and sigma cut Parameters ---------- f_cut: float the upper limit imposed on the quantity (fbar-1) sig_cut: float the upper limit imposed on the quantity (f_sig) wl: numpy ndarray of length npixels rest-frame wavelength vector fluxes: numpy ndarray of shape (nstars, npixels) pixel intensities ivars: numpy ndarray of shape nstars, npixels inverse variances, parallel to fluxes Returns ------- contmask: boolean mask of length npixels True indicates that the pixel is continuum """ f_bar = np.median(fluxes, axis=0) sigma_f = np.var(fluxes, axis=0) bad = np.logical_and(f_bar==0, sigma_f==0) cont1 = np.abs(f_bar-1) <= f_cut cont2 = sigma_f <= sig_cut contmask = np.logical_and(cont1, cont2) contmask[bad] = False return contmask
python
def _find_contpix_given_cuts(f_cut, sig_cut, wl, fluxes, ivars): """ Find and return continuum pixels given the flux and sigma cut Parameters ---------- f_cut: float the upper limit imposed on the quantity (fbar-1) sig_cut: float the upper limit imposed on the quantity (f_sig) wl: numpy ndarray of length npixels rest-frame wavelength vector fluxes: numpy ndarray of shape (nstars, npixels) pixel intensities ivars: numpy ndarray of shape nstars, npixels inverse variances, parallel to fluxes Returns ------- contmask: boolean mask of length npixels True indicates that the pixel is continuum """ f_bar = np.median(fluxes, axis=0) sigma_f = np.var(fluxes, axis=0) bad = np.logical_and(f_bar==0, sigma_f==0) cont1 = np.abs(f_bar-1) <= f_cut cont2 = sigma_f <= sig_cut contmask = np.logical_and(cont1, cont2) contmask[bad] = False return contmask
[ "def", "_find_contpix_given_cuts", "(", "f_cut", ",", "sig_cut", ",", "wl", ",", "fluxes", ",", "ivars", ")", ":", "f_bar", "=", "np", ".", "median", "(", "fluxes", ",", "axis", "=", "0", ")", "sigma_f", "=", "np", ".", "var", "(", "fluxes", ",", "axis", "=", "0", ")", "bad", "=", "np", ".", "logical_and", "(", "f_bar", "==", "0", ",", "sigma_f", "==", "0", ")", "cont1", "=", "np", ".", "abs", "(", "f_bar", "-", "1", ")", "<=", "f_cut", "cont2", "=", "sigma_f", "<=", "sig_cut", "contmask", "=", "np", ".", "logical_and", "(", "cont1", ",", "cont2", ")", "contmask", "[", "bad", "]", "=", "False", "return", "contmask" ]
Find and return continuum pixels given the flux and sigma cut Parameters ---------- f_cut: float the upper limit imposed on the quantity (fbar-1) sig_cut: float the upper limit imposed on the quantity (f_sig) wl: numpy ndarray of length npixels rest-frame wavelength vector fluxes: numpy ndarray of shape (nstars, npixels) pixel intensities ivars: numpy ndarray of shape nstars, npixels inverse variances, parallel to fluxes Returns ------- contmask: boolean mask of length npixels True indicates that the pixel is continuum
[ "Find", "and", "return", "continuum", "pixels", "given", "the", "flux", "and", "sigma", "cut" ]
train
https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/TheCannon/find_continuum_pixels.py#L6-L34
annayqho/TheCannon
TheCannon/find_continuum_pixels.py
_find_contpix
def _find_contpix(wl, fluxes, ivars, target_frac): """ Find continuum pix in spec, meeting a set target fraction Parameters ---------- wl: numpy ndarray rest-frame wavelength vector fluxes: numpy ndarray pixel intensities ivars: numpy ndarray inverse variances, parallel to fluxes target_frac: float the fraction of pixels in spectrum desired to be continuum Returns ------- contmask: boolean numpy ndarray True corresponds to continuum pixels """ print("Target frac: %s" %(target_frac)) bad1 = np.median(ivars, axis=0) == SMALL bad2 = np.var(ivars, axis=0) == 0 bad = np.logical_and(bad1, bad2) npixels = len(wl)-sum(bad) f_cut = 0.0001 stepsize = 0.0001 sig_cut = 0.0001 contmask = _find_contpix_given_cuts(f_cut, sig_cut, wl, fluxes, ivars) if npixels > 0: frac = sum(contmask)/float(npixels) else: frac = 0 while (frac < target_frac): f_cut += stepsize sig_cut += stepsize contmask = _find_contpix_given_cuts(f_cut, sig_cut, wl, fluxes, ivars) if npixels > 0: frac = sum(contmask)/float(npixels) else: frac = 0 if frac > 0.10*npixels: print("Warning: Over 10% of pixels identified as continuum.") print("%s out of %s pixels identified as continuum" %(sum(contmask), npixels)) print("Cuts: f_cut %s, sig_cut %s" %(f_cut, sig_cut)) return contmask
python
def _find_contpix(wl, fluxes, ivars, target_frac): """ Find continuum pix in spec, meeting a set target fraction Parameters ---------- wl: numpy ndarray rest-frame wavelength vector fluxes: numpy ndarray pixel intensities ivars: numpy ndarray inverse variances, parallel to fluxes target_frac: float the fraction of pixels in spectrum desired to be continuum Returns ------- contmask: boolean numpy ndarray True corresponds to continuum pixels """ print("Target frac: %s" %(target_frac)) bad1 = np.median(ivars, axis=0) == SMALL bad2 = np.var(ivars, axis=0) == 0 bad = np.logical_and(bad1, bad2) npixels = len(wl)-sum(bad) f_cut = 0.0001 stepsize = 0.0001 sig_cut = 0.0001 contmask = _find_contpix_given_cuts(f_cut, sig_cut, wl, fluxes, ivars) if npixels > 0: frac = sum(contmask)/float(npixels) else: frac = 0 while (frac < target_frac): f_cut += stepsize sig_cut += stepsize contmask = _find_contpix_given_cuts(f_cut, sig_cut, wl, fluxes, ivars) if npixels > 0: frac = sum(contmask)/float(npixels) else: frac = 0 if frac > 0.10*npixels: print("Warning: Over 10% of pixels identified as continuum.") print("%s out of %s pixels identified as continuum" %(sum(contmask), npixels)) print("Cuts: f_cut %s, sig_cut %s" %(f_cut, sig_cut)) return contmask
[ "def", "_find_contpix", "(", "wl", ",", "fluxes", ",", "ivars", ",", "target_frac", ")", ":", "print", "(", "\"Target frac: %s\"", "%", "(", "target_frac", ")", ")", "bad1", "=", "np", ".", "median", "(", "ivars", ",", "axis", "=", "0", ")", "==", "SMALL", "bad2", "=", "np", ".", "var", "(", "ivars", ",", "axis", "=", "0", ")", "==", "0", "bad", "=", "np", ".", "logical_and", "(", "bad1", ",", "bad2", ")", "npixels", "=", "len", "(", "wl", ")", "-", "sum", "(", "bad", ")", "f_cut", "=", "0.0001", "stepsize", "=", "0.0001", "sig_cut", "=", "0.0001", "contmask", "=", "_find_contpix_given_cuts", "(", "f_cut", ",", "sig_cut", ",", "wl", ",", "fluxes", ",", "ivars", ")", "if", "npixels", ">", "0", ":", "frac", "=", "sum", "(", "contmask", ")", "/", "float", "(", "npixels", ")", "else", ":", "frac", "=", "0", "while", "(", "frac", "<", "target_frac", ")", ":", "f_cut", "+=", "stepsize", "sig_cut", "+=", "stepsize", "contmask", "=", "_find_contpix_given_cuts", "(", "f_cut", ",", "sig_cut", ",", "wl", ",", "fluxes", ",", "ivars", ")", "if", "npixels", ">", "0", ":", "frac", "=", "sum", "(", "contmask", ")", "/", "float", "(", "npixels", ")", "else", ":", "frac", "=", "0", "if", "frac", ">", "0.10", "*", "npixels", ":", "print", "(", "\"Warning: Over 10% of pixels identified as continuum.\"", ")", "print", "(", "\"%s out of %s pixels identified as continuum\"", "%", "(", "sum", "(", "contmask", ")", ",", "npixels", ")", ")", "print", "(", "\"Cuts: f_cut %s, sig_cut %s\"", "%", "(", "f_cut", ",", "sig_cut", ")", ")", "return", "contmask" ]
Find continuum pix in spec, meeting a set target fraction Parameters ---------- wl: numpy ndarray rest-frame wavelength vector fluxes: numpy ndarray pixel intensities ivars: numpy ndarray inverse variances, parallel to fluxes target_frac: float the fraction of pixels in spectrum desired to be continuum Returns ------- contmask: boolean numpy ndarray True corresponds to continuum pixels
[ "Find", "continuum", "pix", "in", "spec", "meeting", "a", "set", "target", "fraction" ]
train
https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/TheCannon/find_continuum_pixels.py#L37-L85
annayqho/TheCannon
TheCannon/find_continuum_pixels.py
_find_contpix_regions
def _find_contpix_regions(wl, fluxes, ivars, frac, ranges): """ Find continuum pix in a spectrum split into chunks Parameters ---------- wl: numpy ndarray rest-frame wavelength vector fluxes: numpy ndarray pixel intensities ivars: numpy ndarray inverse variances, parallel to fluxes frac: float fraction of pixels in spectrum to be found as continuum ranges: list, array starts and ends indicating location of chunks in array Returns ------ contmask: numpy ndarray, boolean True indicates continuum pixel """ contmask = np.zeros(len(wl), dtype=bool) for chunk in ranges: start = chunk[0] stop = chunk[1] contmask[start:stop] = _find_contpix( wl[start:stop], fluxes[:,start:stop], ivars[:,start:stop], frac) return contmask
python
def _find_contpix_regions(wl, fluxes, ivars, frac, ranges): """ Find continuum pix in a spectrum split into chunks Parameters ---------- wl: numpy ndarray rest-frame wavelength vector fluxes: numpy ndarray pixel intensities ivars: numpy ndarray inverse variances, parallel to fluxes frac: float fraction of pixels in spectrum to be found as continuum ranges: list, array starts and ends indicating location of chunks in array Returns ------ contmask: numpy ndarray, boolean True indicates continuum pixel """ contmask = np.zeros(len(wl), dtype=bool) for chunk in ranges: start = chunk[0] stop = chunk[1] contmask[start:stop] = _find_contpix( wl[start:stop], fluxes[:,start:stop], ivars[:,start:stop], frac) return contmask
[ "def", "_find_contpix_regions", "(", "wl", ",", "fluxes", ",", "ivars", ",", "frac", ",", "ranges", ")", ":", "contmask", "=", "np", ".", "zeros", "(", "len", "(", "wl", ")", ",", "dtype", "=", "bool", ")", "for", "chunk", "in", "ranges", ":", "start", "=", "chunk", "[", "0", "]", "stop", "=", "chunk", "[", "1", "]", "contmask", "[", "start", ":", "stop", "]", "=", "_find_contpix", "(", "wl", "[", "start", ":", "stop", "]", ",", "fluxes", "[", ":", ",", "start", ":", "stop", "]", ",", "ivars", "[", ":", ",", "start", ":", "stop", "]", ",", "frac", ")", "return", "contmask" ]
Find continuum pix in a spectrum split into chunks Parameters ---------- wl: numpy ndarray rest-frame wavelength vector fluxes: numpy ndarray pixel intensities ivars: numpy ndarray inverse variances, parallel to fluxes frac: float fraction of pixels in spectrum to be found as continuum ranges: list, array starts and ends indicating location of chunks in array Returns ------ contmask: numpy ndarray, boolean True indicates continuum pixel
[ "Find", "continuum", "pix", "in", "a", "spectrum", "split", "into", "chunks" ]
train
https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/TheCannon/find_continuum_pixels.py#L88-L119
annayqho/TheCannon
code/lamost/xcalib_5labels/cross_validation.py
group_data
def group_data(): """ Load the reference data, and assign each object a random integer from 0 to 7. Save the IDs. """ tr_obj = np.load("%s/ref_id.npz" %direc_ref)['arr_0'] groups = np.random.randint(0, 8, size=len(tr_obj)) np.savez("ref_groups.npz", groups)
python
def group_data(): """ Load the reference data, and assign each object a random integer from 0 to 7. Save the IDs. """ tr_obj = np.load("%s/ref_id.npz" %direc_ref)['arr_0'] groups = np.random.randint(0, 8, size=len(tr_obj)) np.savez("ref_groups.npz", groups)
[ "def", "group_data", "(", ")", ":", "tr_obj", "=", "np", ".", "load", "(", "\"%s/ref_id.npz\"", "%", "direc_ref", ")", "[", "'arr_0'", "]", "groups", "=", "np", ".", "random", ".", "randint", "(", "0", ",", "8", ",", "size", "=", "len", "(", "tr_obj", ")", ")", "np", ".", "savez", "(", "\"ref_groups.npz\"", ",", "groups", ")" ]
Load the reference data, and assign each object a random integer from 0 to 7. Save the IDs.
[ "Load", "the", "reference", "data", "and", "assign", "each", "object", "a", "random", "integer", "from", "0", "to", "7", ".", "Save", "the", "IDs", "." ]
train
https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/code/lamost/xcalib_5labels/cross_validation.py#L26-L32
annayqho/TheCannon
code/lamost/xcalib_5labels/cross_validation.py
train
def train(ds, ii): """ Run the training step, given a dataset object. """ print("Loading model") m = model.CannonModel(2) print("Training...") m.fit(ds) np.savez("./ex%s_coeffs.npz" %ii, m.coeffs) np.savez("./ex%s_scatters.npz" %ii, m.scatters) np.savez("./ex%s_chisqs.npz" %ii, m.chisqs) np.savez("./ex%s_pivots.npz" %ii, m.pivots) fig = m.diagnostics_leading_coeffs(ds) plt.savefig("ex%s_leading_coeffs.png" %ii) # m.diagnostics_leading_coeffs_triangle(ds) # m.diagnostics_plot_chisq(ds) return m
python
def train(ds, ii): """ Run the training step, given a dataset object. """ print("Loading model") m = model.CannonModel(2) print("Training...") m.fit(ds) np.savez("./ex%s_coeffs.npz" %ii, m.coeffs) np.savez("./ex%s_scatters.npz" %ii, m.scatters) np.savez("./ex%s_chisqs.npz" %ii, m.chisqs) np.savez("./ex%s_pivots.npz" %ii, m.pivots) fig = m.diagnostics_leading_coeffs(ds) plt.savefig("ex%s_leading_coeffs.png" %ii) # m.diagnostics_leading_coeffs_triangle(ds) # m.diagnostics_plot_chisq(ds) return m
[ "def", "train", "(", "ds", ",", "ii", ")", ":", "print", "(", "\"Loading model\"", ")", "m", "=", "model", ".", "CannonModel", "(", "2", ")", "print", "(", "\"Training...\"", ")", "m", ".", "fit", "(", "ds", ")", "np", ".", "savez", "(", "\"./ex%s_coeffs.npz\"", "%", "ii", ",", "m", ".", "coeffs", ")", "np", ".", "savez", "(", "\"./ex%s_scatters.npz\"", "%", "ii", ",", "m", ".", "scatters", ")", "np", ".", "savez", "(", "\"./ex%s_chisqs.npz\"", "%", "ii", ",", "m", ".", "chisqs", ")", "np", ".", "savez", "(", "\"./ex%s_pivots.npz\"", "%", "ii", ",", "m", ".", "pivots", ")", "fig", "=", "m", ".", "diagnostics_leading_coeffs", "(", "ds", ")", "plt", ".", "savefig", "(", "\"ex%s_leading_coeffs.png\"", "%", "ii", ")", "# m.diagnostics_leading_coeffs_triangle(ds)", "# m.diagnostics_plot_chisq(ds)", "return", "m" ]
Run the training step, given a dataset object.
[ "Run", "the", "training", "step", "given", "a", "dataset", "object", "." ]
train
https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/code/lamost/xcalib_5labels/cross_validation.py#L35-L49
annayqho/TheCannon
code/lamost/xcalib_5labels/cross_validation.py
xvalidate
def xvalidate(): """ Train a model, leaving out a group corresponding to a random integer from 0 to 7, e.g. leave out 0. Test on the remaining 1/8 of the sample. """ print("Loading data") groups = np.load("ref_groups.npz")['arr_0'] ref_label = np.load("%s/ref_label.npz" %direc_ref)['arr_0'] ref_id = np.load("%s/ref_id.npz" %direc_ref)['arr_0'] ref_flux = np.load("%s/ref_flux.npz" %direc_ref)['arr_0'] ref_ivar = np.load("%s/ref_ivar.npz" %direc_ref)['arr_0'] wl = np.load("%s/wl.npz" %direc_ref)['arr_0'] num_models = 8 for ii in np.arange(num_models): print("Leaving out group %s" %ii) train_on = groups != ii test_on = groups == ii tr_label = ref_label[train_on] tr_id = ref_id[train_on] tr_flux = ref_flux[train_on] tr_ivar = ref_ivar[train_on] print("Training on %s objects" %len(tr_id)) test_label = ref_label[test_on] test_id = ref_id[test_on] test_flux = ref_flux[test_on] test_ivar = ref_ivar[test_on] print("Testing on %s objects" %len(test_id)) print("Loading dataset...") ds = dataset.Dataset( wl, tr_id, tr_flux, tr_ivar, tr_label, test_id, test_flux, test_ivar) ds.set_label_names( ['T_{eff}', '\log g', '[M/H]', '[\\alpha/Fe]', 'AKWISE']) fig = ds.diagnostics_SNR() plt.savefig("ex%s_SNR.png" %ii) fig = ds.diagnostics_ref_labels() plt.savefig("ex%s_ref_label_triangle.png" %ii) np.savez("ex%s_tr_snr.npz" %ii, ds.tr_SNR) # train a model m = train(ds, ii) # test step ds.tr_label = test_label # to compare the results test(ds, m, ii)
python
def xvalidate(): """ Train a model, leaving out a group corresponding to a random integer from 0 to 7, e.g. leave out 0. Test on the remaining 1/8 of the sample. """ print("Loading data") groups = np.load("ref_groups.npz")['arr_0'] ref_label = np.load("%s/ref_label.npz" %direc_ref)['arr_0'] ref_id = np.load("%s/ref_id.npz" %direc_ref)['arr_0'] ref_flux = np.load("%s/ref_flux.npz" %direc_ref)['arr_0'] ref_ivar = np.load("%s/ref_ivar.npz" %direc_ref)['arr_0'] wl = np.load("%s/wl.npz" %direc_ref)['arr_0'] num_models = 8 for ii in np.arange(num_models): print("Leaving out group %s" %ii) train_on = groups != ii test_on = groups == ii tr_label = ref_label[train_on] tr_id = ref_id[train_on] tr_flux = ref_flux[train_on] tr_ivar = ref_ivar[train_on] print("Training on %s objects" %len(tr_id)) test_label = ref_label[test_on] test_id = ref_id[test_on] test_flux = ref_flux[test_on] test_ivar = ref_ivar[test_on] print("Testing on %s objects" %len(test_id)) print("Loading dataset...") ds = dataset.Dataset( wl, tr_id, tr_flux, tr_ivar, tr_label, test_id, test_flux, test_ivar) ds.set_label_names( ['T_{eff}', '\log g', '[M/H]', '[\\alpha/Fe]', 'AKWISE']) fig = ds.diagnostics_SNR() plt.savefig("ex%s_SNR.png" %ii) fig = ds.diagnostics_ref_labels() plt.savefig("ex%s_ref_label_triangle.png" %ii) np.savez("ex%s_tr_snr.npz" %ii, ds.tr_SNR) # train a model m = train(ds, ii) # test step ds.tr_label = test_label # to compare the results test(ds, m, ii)
[ "def", "xvalidate", "(", ")", ":", "print", "(", "\"Loading data\"", ")", "groups", "=", "np", ".", "load", "(", "\"ref_groups.npz\"", ")", "[", "'arr_0'", "]", "ref_label", "=", "np", ".", "load", "(", "\"%s/ref_label.npz\"", "%", "direc_ref", ")", "[", "'arr_0'", "]", "ref_id", "=", "np", ".", "load", "(", "\"%s/ref_id.npz\"", "%", "direc_ref", ")", "[", "'arr_0'", "]", "ref_flux", "=", "np", ".", "load", "(", "\"%s/ref_flux.npz\"", "%", "direc_ref", ")", "[", "'arr_0'", "]", "ref_ivar", "=", "np", ".", "load", "(", "\"%s/ref_ivar.npz\"", "%", "direc_ref", ")", "[", "'arr_0'", "]", "wl", "=", "np", ".", "load", "(", "\"%s/wl.npz\"", "%", "direc_ref", ")", "[", "'arr_0'", "]", "num_models", "=", "8", "for", "ii", "in", "np", ".", "arange", "(", "num_models", ")", ":", "print", "(", "\"Leaving out group %s\"", "%", "ii", ")", "train_on", "=", "groups", "!=", "ii", "test_on", "=", "groups", "==", "ii", "tr_label", "=", "ref_label", "[", "train_on", "]", "tr_id", "=", "ref_id", "[", "train_on", "]", "tr_flux", "=", "ref_flux", "[", "train_on", "]", "tr_ivar", "=", "ref_ivar", "[", "train_on", "]", "print", "(", "\"Training on %s objects\"", "%", "len", "(", "tr_id", ")", ")", "test_label", "=", "ref_label", "[", "test_on", "]", "test_id", "=", "ref_id", "[", "test_on", "]", "test_flux", "=", "ref_flux", "[", "test_on", "]", "test_ivar", "=", "ref_ivar", "[", "test_on", "]", "print", "(", "\"Testing on %s objects\"", "%", "len", "(", "test_id", ")", ")", "print", "(", "\"Loading dataset...\"", ")", "ds", "=", "dataset", ".", "Dataset", "(", "wl", ",", "tr_id", ",", "tr_flux", ",", "tr_ivar", ",", "tr_label", ",", "test_id", ",", "test_flux", ",", "test_ivar", ")", "ds", ".", "set_label_names", "(", "[", "'T_{eff}'", ",", "'\\log g'", ",", "'[M/H]'", ",", "'[\\\\alpha/Fe]'", ",", "'AKWISE'", "]", ")", "fig", "=", "ds", ".", "diagnostics_SNR", "(", ")", "plt", ".", "savefig", "(", "\"ex%s_SNR.png\"", "%", "ii", ")", "fig", "=", "ds", ".", "diagnostics_ref_labels", "(", ")", "plt", ".", "savefig", "(", "\"ex%s_ref_label_triangle.png\"", "%", "ii", ")", "np", ".", "savez", "(", "\"ex%s_tr_snr.npz\"", "%", "ii", ",", "ds", ".", "tr_SNR", ")", "# train a model", "m", "=", "train", "(", "ds", ",", "ii", ")", "# test step", "ds", ".", "tr_label", "=", "test_label", "# to compare the results", "test", "(", "ds", ",", "m", ",", "ii", ")" ]
Train a model, leaving out a group corresponding to a random integer from 0 to 7, e.g. leave out 0. Test on the remaining 1/8 of the sample.
[ "Train", "a", "model", "leaving", "out", "a", "group", "corresponding", "to", "a", "random", "integer", "from", "0", "to", "7", "e", ".", "g", ".", "leave", "out", "0", ".", "Test", "on", "the", "remaining", "1", "/", "8", "of", "the", "sample", "." ]
train
https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/code/lamost/xcalib_5labels/cross_validation.py#L159-L207
annayqho/TheCannon
code/aaomega/aaomega_munge_data.py
weighted_std
def weighted_std(values, weights): """ Calculate standard deviation weighted by errors """ average = np.average(values, weights=weights) variance = np.average((values-average)**2, weights=weights) return np.sqrt(variance)
python
def weighted_std(values, weights): """ Calculate standard deviation weighted by errors """ average = np.average(values, weights=weights) variance = np.average((values-average)**2, weights=weights) return np.sqrt(variance)
[ "def", "weighted_std", "(", "values", ",", "weights", ")", ":", "average", "=", "np", ".", "average", "(", "values", ",", "weights", "=", "weights", ")", "variance", "=", "np", ".", "average", "(", "(", "values", "-", "average", ")", "**", "2", ",", "weights", "=", "weights", ")", "return", "np", ".", "sqrt", "(", "variance", ")" ]
Calculate standard deviation weighted by errors
[ "Calculate", "standard", "deviation", "weighted", "by", "errors" ]
train
https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/code/aaomega/aaomega_munge_data.py#L12-L16
annayqho/TheCannon
code/aaomega/aaomega_munge_data.py
estimate_noise
def estimate_noise(fluxes, contmask): """ Estimate the scatter in a region of the spectrum taken to be continuum """ nstars = fluxes.shape[0] scatter = np.zeros(nstars) for i,spec in enumerate(fluxes): cont = spec[contmask] scatter[i] = stats.funcs.mad_std(cont) return scatter
python
def estimate_noise(fluxes, contmask): """ Estimate the scatter in a region of the spectrum taken to be continuum """ nstars = fluxes.shape[0] scatter = np.zeros(nstars) for i,spec in enumerate(fluxes): cont = spec[contmask] scatter[i] = stats.funcs.mad_std(cont) return scatter
[ "def", "estimate_noise", "(", "fluxes", ",", "contmask", ")", ":", "nstars", "=", "fluxes", ".", "shape", "[", "0", "]", "scatter", "=", "np", ".", "zeros", "(", "nstars", ")", "for", "i", ",", "spec", "in", "enumerate", "(", "fluxes", ")", ":", "cont", "=", "spec", "[", "contmask", "]", "scatter", "[", "i", "]", "=", "stats", ".", "funcs", ".", "mad_std", "(", "cont", ")", "return", "scatter" ]
Estimate the scatter in a region of the spectrum taken to be continuum
[ "Estimate", "the", "scatter", "in", "a", "region", "of", "the", "spectrum", "taken", "to", "be", "continuum" ]
train
https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/code/aaomega/aaomega_munge_data.py#L19-L27
annayqho/TheCannon
code/aaomega/aaomega_munge_data.py
load_ref_spectra
def load_ref_spectra(): """ Pull out wl, flux, ivar from files of training spectra """ data_dir = "/Users/annaho/Data/AAOmega/ref_spectra" # Load the files & count the number of training objects ff = glob.glob("%s/*.txt" %data_dir) nstars = len(ff) print("We have %s training objects" %nstars) # Read the first file to get the wavelength array f = ff[0] data = Table.read(f, format="ascii.fast_no_header") wl = data['col1'] npix = len(wl) print("We have %s pixels" %npix) tr_flux = np.zeros((nstars,npix)) tr_ivar = np.zeros(tr_flux.shape) for i,f in enumerate(ff): data = Table.read(f, format="ascii.fast_no_header") flux = data['col2'] tr_flux[i,:] = flux sigma = data['col3'] tr_ivar[i,:] = 1.0 / sigma**2 return np.array(ff), wl, tr_flux, tr_ivar
python
def load_ref_spectra(): """ Pull out wl, flux, ivar from files of training spectra """ data_dir = "/Users/annaho/Data/AAOmega/ref_spectra" # Load the files & count the number of training objects ff = glob.glob("%s/*.txt" %data_dir) nstars = len(ff) print("We have %s training objects" %nstars) # Read the first file to get the wavelength array f = ff[0] data = Table.read(f, format="ascii.fast_no_header") wl = data['col1'] npix = len(wl) print("We have %s pixels" %npix) tr_flux = np.zeros((nstars,npix)) tr_ivar = np.zeros(tr_flux.shape) for i,f in enumerate(ff): data = Table.read(f, format="ascii.fast_no_header") flux = data['col2'] tr_flux[i,:] = flux sigma = data['col3'] tr_ivar[i,:] = 1.0 / sigma**2 return np.array(ff), wl, tr_flux, tr_ivar
[ "def", "load_ref_spectra", "(", ")", ":", "data_dir", "=", "\"/Users/annaho/Data/AAOmega/ref_spectra\"", "# Load the files & count the number of training objects", "ff", "=", "glob", ".", "glob", "(", "\"%s/*.txt\"", "%", "data_dir", ")", "nstars", "=", "len", "(", "ff", ")", "print", "(", "\"We have %s training objects\"", "%", "nstars", ")", "# Read the first file to get the wavelength array", "f", "=", "ff", "[", "0", "]", "data", "=", "Table", ".", "read", "(", "f", ",", "format", "=", "\"ascii.fast_no_header\"", ")", "wl", "=", "data", "[", "'col1'", "]", "npix", "=", "len", "(", "wl", ")", "print", "(", "\"We have %s pixels\"", "%", "npix", ")", "tr_flux", "=", "np", ".", "zeros", "(", "(", "nstars", ",", "npix", ")", ")", "tr_ivar", "=", "np", ".", "zeros", "(", "tr_flux", ".", "shape", ")", "for", "i", ",", "f", "in", "enumerate", "(", "ff", ")", ":", "data", "=", "Table", ".", "read", "(", "f", ",", "format", "=", "\"ascii.fast_no_header\"", ")", "flux", "=", "data", "[", "'col2'", "]", "tr_flux", "[", "i", ",", ":", "]", "=", "flux", "sigma", "=", "data", "[", "'col3'", "]", "tr_ivar", "[", "i", ",", ":", "]", "=", "1.0", "/", "sigma", "**", "2", "return", "np", ".", "array", "(", "ff", ")", ",", "wl", ",", "tr_flux", ",", "tr_ivar" ]
Pull out wl, flux, ivar from files of training spectra
[ "Pull", "out", "wl", "flux", "ivar", "from", "files", "of", "training", "spectra" ]
train
https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/code/aaomega/aaomega_munge_data.py#L30-L55
annayqho/TheCannon
code/aaomega/aaomega_munge_data.py
load_data
def load_data(): data_dir = "/Users/annaho/Data/AAOmega" out_dir = "%s/%s" %(data_dir, "Run_13_July") """ Use all the above functions to set data up for The Cannon """ ff, wl, tr_flux, tr_ivar = load_ref_spectra() """ pick one that doesn't have extra dead pixels """ skylines = tr_ivar[4,:] # should be the same across all obj np.savez("%s/skylines.npz" %out_dir, skylines) contmask = np.load("%s/contmask_regions.npz" %data_dir)['arr_0'] scatter = estimate_noise(tr_flux, contmask) ids, labels = load_labels() # Select the objects in the catalog corresponding to the files inds = [] ff_short = [] for fname in ff: val = fname.split("/")[-1] short = (val.split('.')[0] + '.' + val.split('.')[1]) ff_short.append(short) if short in ids: ind = np.where(ids==short)[0][0] inds.append(ind) # choose the labels tr_id = ids[inds] tr_label = labels[inds] # find the corresponding spectra ff_short = np.array(ff_short) inds = np.array([np.where(ff_short==val)[0][0] for val in tr_id]) tr_flux_choose = tr_flux[inds] tr_ivar_choose = tr_ivar[inds] scatter_choose = scatter[inds] np.savez("%s/wl.npz" %out_dir, wl) np.savez("%s/ref_id_all.npz" %out_dir, tr_id) np.savez("%s/ref_flux_all.npz" %out_dir, tr_flux_choose) np.savez("%s/ref_ivar_all.npz" %out_dir, tr_ivar_choose) np.savez("%s/ref_label_all.npz" %out_dir, tr_label) np.savez("%s/ref_spec_scat_all.npz" %out_dir, scatter_choose) # now, the test spectra test_id, test_flux = load_test_spectra() scatter = estimate_noise(test_flux, contmask) np.savez("%s/test_id.npz" %out_dir, test_id) np.savez("%s/test_flux.npz" %out_dir, test_flux) np.savez("%s/test_spec_scat.npz" %out_dir, scatter)
python
def load_data(): data_dir = "/Users/annaho/Data/AAOmega" out_dir = "%s/%s" %(data_dir, "Run_13_July") """ Use all the above functions to set data up for The Cannon """ ff, wl, tr_flux, tr_ivar = load_ref_spectra() """ pick one that doesn't have extra dead pixels """ skylines = tr_ivar[4,:] # should be the same across all obj np.savez("%s/skylines.npz" %out_dir, skylines) contmask = np.load("%s/contmask_regions.npz" %data_dir)['arr_0'] scatter = estimate_noise(tr_flux, contmask) ids, labels = load_labels() # Select the objects in the catalog corresponding to the files inds = [] ff_short = [] for fname in ff: val = fname.split("/")[-1] short = (val.split('.')[0] + '.' + val.split('.')[1]) ff_short.append(short) if short in ids: ind = np.where(ids==short)[0][0] inds.append(ind) # choose the labels tr_id = ids[inds] tr_label = labels[inds] # find the corresponding spectra ff_short = np.array(ff_short) inds = np.array([np.where(ff_short==val)[0][0] for val in tr_id]) tr_flux_choose = tr_flux[inds] tr_ivar_choose = tr_ivar[inds] scatter_choose = scatter[inds] np.savez("%s/wl.npz" %out_dir, wl) np.savez("%s/ref_id_all.npz" %out_dir, tr_id) np.savez("%s/ref_flux_all.npz" %out_dir, tr_flux_choose) np.savez("%s/ref_ivar_all.npz" %out_dir, tr_ivar_choose) np.savez("%s/ref_label_all.npz" %out_dir, tr_label) np.savez("%s/ref_spec_scat_all.npz" %out_dir, scatter_choose) # now, the test spectra test_id, test_flux = load_test_spectra() scatter = estimate_noise(test_flux, contmask) np.savez("%s/test_id.npz" %out_dir, test_id) np.savez("%s/test_flux.npz" %out_dir, test_flux) np.savez("%s/test_spec_scat.npz" %out_dir, scatter)
[ "def", "load_data", "(", ")", ":", "data_dir", "=", "\"/Users/annaho/Data/AAOmega\"", "out_dir", "=", "\"%s/%s\"", "%", "(", "data_dir", ",", "\"Run_13_July\"", ")", "ff", ",", "wl", ",", "tr_flux", ",", "tr_ivar", "=", "load_ref_spectra", "(", ")", "\"\"\" pick one that doesn't have extra dead pixels \"\"\"", "skylines", "=", "tr_ivar", "[", "4", ",", ":", "]", "# should be the same across all obj", "np", ".", "savez", "(", "\"%s/skylines.npz\"", "%", "out_dir", ",", "skylines", ")", "contmask", "=", "np", ".", "load", "(", "\"%s/contmask_regions.npz\"", "%", "data_dir", ")", "[", "'arr_0'", "]", "scatter", "=", "estimate_noise", "(", "tr_flux", ",", "contmask", ")", "ids", ",", "labels", "=", "load_labels", "(", ")", "# Select the objects in the catalog corresponding to the files", "inds", "=", "[", "]", "ff_short", "=", "[", "]", "for", "fname", "in", "ff", ":", "val", "=", "fname", ".", "split", "(", "\"/\"", ")", "[", "-", "1", "]", "short", "=", "(", "val", ".", "split", "(", "'.'", ")", "[", "0", "]", "+", "'.'", "+", "val", ".", "split", "(", "'.'", ")", "[", "1", "]", ")", "ff_short", ".", "append", "(", "short", ")", "if", "short", "in", "ids", ":", "ind", "=", "np", ".", "where", "(", "ids", "==", "short", ")", "[", "0", "]", "[", "0", "]", "inds", ".", "append", "(", "ind", ")", "# choose the labels", "tr_id", "=", "ids", "[", "inds", "]", "tr_label", "=", "labels", "[", "inds", "]", "# find the corresponding spectra", "ff_short", "=", "np", ".", "array", "(", "ff_short", ")", "inds", "=", "np", ".", "array", "(", "[", "np", ".", "where", "(", "ff_short", "==", "val", ")", "[", "0", "]", "[", "0", "]", "for", "val", "in", "tr_id", "]", ")", "tr_flux_choose", "=", "tr_flux", "[", "inds", "]", "tr_ivar_choose", "=", "tr_ivar", "[", "inds", "]", "scatter_choose", "=", "scatter", "[", "inds", "]", "np", ".", "savez", "(", "\"%s/wl.npz\"", "%", "out_dir", ",", "wl", ")", "np", ".", "savez", "(", "\"%s/ref_id_all.npz\"", "%", "out_dir", ",", "tr_id", ")", "np", ".", "savez", "(", "\"%s/ref_flux_all.npz\"", "%", "out_dir", ",", "tr_flux_choose", ")", "np", ".", "savez", "(", "\"%s/ref_ivar_all.npz\"", "%", "out_dir", ",", "tr_ivar_choose", ")", "np", ".", "savez", "(", "\"%s/ref_label_all.npz\"", "%", "out_dir", ",", "tr_label", ")", "np", ".", "savez", "(", "\"%s/ref_spec_scat_all.npz\"", "%", "out_dir", ",", "scatter_choose", ")", "# now, the test spectra", "test_id", ",", "test_flux", "=", "load_test_spectra", "(", ")", "scatter", "=", "estimate_noise", "(", "test_flux", ",", "contmask", ")", "np", ".", "savez", "(", "\"%s/test_id.npz\"", "%", "out_dir", ",", "test_id", ")", "np", ".", "savez", "(", "\"%s/test_flux.npz\"", "%", "out_dir", ",", "test_flux", ")", "np", ".", "savez", "(", "\"%s/test_spec_scat.npz\"", "%", "out_dir", ",", "scatter", ")" ]
Use all the above functions to set data up for The Cannon
[ "Use", "all", "the", "above", "functions", "to", "set", "data", "up", "for", "The", "Cannon" ]
train
https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/code/aaomega/aaomega_munge_data.py#L109-L157