content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def rivers_with_station(stations): """rivers_with_station(stations) returns a sorted list of the rivers stations are on without repeats""" stationRivers = set() for station in stations: stationRivers.add(station.river) stationRivers = sorted(stationRivers) return stationRivers
d13044fef2a824d0d08e4419a13a2b22f1732175
12,322
def _format_media_type(endpoint, version, suffix): """ Formats a value for a cosmos Content-Type or Accept header key. :param endpoint: a cosmos endpoint, of the form 'x/y', for example 'package/repo/add', 'service/start', or 'package/error' :type endpoint: str :param version: The version of the request :type version: str :param suffix: The string that will be appended to endpoint type, most commonly 'request' or 'response' :type suffix: str :return: a formatted value for a Content-Type or Accept header key :rtype: str """ prefix = endpoint.replace('/', '.') separator = '-' if suffix else '' return ('application/vnd.dcos.{}{}{}' '+json;charset=utf-8;version={}').format(prefix, separator, suffix, version)
993e178fc2a91490544936e019342e6ab8f928ce
12,325
def fill_result_from_objective_history(result, history): """ Overwrite function values in the result object with the values recorded in the history. """ # counters result.n_fval = history.n_fval result.n_grad = history.n_grad result.n_hess = history.n_hess result.n_res = history.n_res result.n_sres = history.n_sres # initial values result.x0 = history.x0 result.fval0 = history.fval0 # best found values result.x = history.x_min result.fval = history.fval_min # trace result.trace = history.trace return result
a2f1388c5d71a06f45369098039a3fa623a25735
12,326
def Armijo_Rule(f_next,f_initial,c1,step_size,pg_initial): """ :param f_next: New value of the function to be optimized wrt/ step size :param f_initial: Value of the function before line search for optimum step size :param c1: 0<c1<c2<1 :param step_size: step size to be tested :param pg: inner product of step direction, p, with the gradient before stepping, g_initial :return: True if condition is satisfied """ return (f_next <= f_initial+c1*step_size*pg_initial)
90462eda94244c10afdf34e1ad042118d793c4fd
12,328
import socket def connect(host, port): """Connect to remote host.""" # Create socket try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) except socket.error as msg: return (None, msg) # Get remote IP try: addr = socket.gethostbyname(host) except socket.gaierror as msg: s.close() return (None, msg) # Connect try: s.connect((addr, port)) except socket.error as msg: s.close() return (None, msg) return (s, None)
ab03e5551f0fbf92c6e2417635bc753d5b77eae2
12,332
import pprint def pfomart(obj): """格式化输出某对象 :param obj: obj对象 :return: 格式化出处的字符串 """ return pprint.pformat(obj)
2f2c7d7df9de9bb65c72634e8ba9386365408445
12,335
import re def rle2cells(rle_str: str) -> str: """Convert lifeform string in RLE encoding to PlainText Args: rle_str (str): single line of RLE commands Returns: str: valid PlainText-encoded lifeform """ # drop the last part if "!" in rle_str: rle_str = rle_str[: rle_str.index("!")] else: raise ValueError('Incorrect input: no "!"') if not set(rle_str).issubset("0123456789bo$"): raise ValueError("Incorrect input: wrong character set") commands = re.findall("([0-9]*)(b|o|\\$)", rle_str) if len(commands) == 0: raise ValueError("Incorrect input: wrong pattern format") layout_string = "" parse_dict = {"b": ".", "o": "O", "$": "\n"} for com in commands: n = int(com[0]) if com[0] else 1 layout_string += parse_dict[com[1]] * n return layout_string
e48ea40bd9032445e1aabe4753b7fbdcc62191ed
12,338
def pow_sq(x, y): """ Compute x^y, without calling its pow, using exponentiation by squaring. """ r = 1 while y: if y & 1: r = r * x x = x * x y >>= 1 return r
f6d82257ff909d9c890360dcd9408db0d742b13d
12,345
def get_point(values, pct): """ Pass in array values and return the point at the specified top percent :param values: array: float :param pct: float, top percent :return: float """ assert 0 < pct < 1, "percentage should be lower than 1" values = sorted(values) return values[-int(len(values)*pct)]
11474d804e0845284a864a5cb8de23299999a5e7
12,351
def check_columns(df): """ Checks wether dataframe contains the required columns. :param df: the data frame with track data. :return: whether or not df contains all the required columns. required columns: refName, start, end, name, score (is optional) :rtype: boolean """ required = ["refName", "start", "end", "name"] return all(col in df for col in required)
3ddc53fc0e2caad74b3218f7f47134aed258cba9
12,354
def accuracy_inf_sol(inferred,cliques_solution): """ 'inferred' should be a set of vertices 'cliques_solution' an iterable of all the solution cliques (as sets) """ assert len(cliques_solution)!=0, "No solution provided!" max_overlap = 0 best_clique_sol = cliques_solution[0] clique_size = len(cliques_solution[0]) for cur_clique in cliques_solution: temp_inter = cur_clique.intersection(inferred) cur_overlap = len(temp_inter) if cur_overlap > max_overlap: max_overlap = cur_overlap best_clique_sol = cur_clique return max_overlap, clique_size, best_clique_sol
d2d38d7f3470520058f90699dab6b7eb59cbd5cf
12,355
def _ziprange(alist, ix): """ returns zip of the list, and the one with ix added at the end and first element dropped Example ------- :: alist = [2,4,7] _ziprange (alist, 10) --> zip([2,4,7], [4,7,10]) -> (2,4), (4,7), (7,10) """ blist = alist.copy() blist.append(ix) del blist[0] return zip(alist, blist)
e816ad20e8c193487d8483cdfd3ee27d77fef814
12,356
import re def title_output_replace(input_title_output, metadata_dict, data_dict, rel_channels_dict, is_title = False, custom_vars = None): """Substitute %VAR% variables with provided values and constants. Given a title (or output path) string template, replace %VAR% variables with the appropriate value or constant. Variables supported include: * %EXPERIMENT_ID% - Experiment ID, sourced from metadata_dict. * %INSTRUMENT_SAT% - Instrument/satellite ID, sourced from metadata_dict. * %CHANNEL% - Channel number, sourced from metadata_dict. * %RELCHANNEL% - Relative channel number, indirectly sourced from rel_channels_dict. * %FREQUENCY% - Frequency of the selected channel, sourced from the frequency field in data_dict. * %ASSIMILATION_STATUS% - Placeholder for the assimilation status, which is determined from the iuse field in data_dict (not done here). * %START_DATE% - Start date in YYYYMMDD format, sourced from metadata_dict. * %END_DATE% - End date in YYYYMMDD format, sourced from metadata_dict. * Additional custom %VARS% sourced from custom_vars, if specified. Args: input_title_output (str): The string with the title (or output path) template. metadata_dict (dict): The metadata dictionary containing data source information. data_dict (dict): The data dictionary to retrieve values from for certain %VAR% variables. See get_data() help (in data.py) for more information on its format. rel_channels_dict (dict): The relative channels dictionary to map relative channels to actual data channels. Its keys are the relative channels, and its values are the actual data channels. (In this case, the mapping is reversed later to allow easy conversion from data channel to relative channel.) is_title (bool): Boolean indicating whether the string templace is a title or not. This affects how and what variables are replaced. By default, this is set to False. custom_vars (dict): Dictionary containing custom variables to be replaced. Its keys are %VAR% variables without the percent sign, and its values are what should take their places. For instance, given { "TESTVAR": "test123" }, the template "%TESTVAR%" should be replaced with "test123". By default, this is set to None - this argument is optional if there are no custom variables. Returns: str: A string with %VAR% variables replaced with the appropriate value or constant. Some %VAR% variables may not be replaced if they do not exist, or certain conditions are not met. """ # Replace experiment ID variable input_title_output = input_title_output.replace("%EXPERIMENT_ID%", metadata_dict["experiment_id"]) # Capitalize %INSTRUMENT_SAT% if we're using it in a title. if is_title: input_title_output = input_title_output.replace("%INSTRUMENT_SAT%", metadata_dict["instrument_sat"].upper()) else: input_title_output = input_title_output.replace("%INSTRUMENT_SAT%", metadata_dict["instrument_sat"]) # Replace data channel variable input_title_output = input_title_output.replace("%CHANNEL%", str(metadata_dict["channel"])) # Reverse the channel map # Original: rel_channel -> actual data channel # Inverted: actual data channel -> rel_channel rel_channels_inv_map = dict(zip(rel_channels_dict.values(), rel_channels_dict.keys())) input_title_output = input_title_output.replace("%RELCHANNEL%", str(rel_channels_inv_map[metadata_dict["channel"]])) # Ensure that we have adequate data to determine frequency. # If we do, replace the frequency variable! if data_dict and "frequency" in data_dict: input_title_output = input_title_output.replace("%FREQUENCY%", str(data_dict["frequency"])) # Replace assimilation status placeholder... only if it's a title. if is_title: input_title_output = input_title_output.replace("%ASSIMILATION_STATUS%", " .......................") # Replace date variables input_title_output = input_title_output.replace("%START_DATE%", str(metadata_dict['start_year']).zfill(4) + str(metadata_dict['start_month']).zfill(2) + str(metadata_dict['start_day']).zfill(2)) input_title_output = input_title_output.replace("%END_DATE%", str(metadata_dict['end_year']).zfill(4) + str(metadata_dict['end_month']).zfill(2) + str(metadata_dict['end_day']).zfill(2)) # Custom vars if custom_vars: for custom_var in custom_vars: # Do a case insensitive replace replace_re = re.compile(re.escape('%'+custom_var+'%'), re.IGNORECASE) input_title_output = replace_re.sub(custom_vars[custom_var], input_title_output) return input_title_output
83102c1557643b67ce1c52f0c69154bcfac4ac72
12,358
def format_data(data, es_index): """ Format data for bulk indexing into elasticsearch """ unit = data["unit"] rate_unit = data["rateUnit"] egvs = data["egvs"] docs = [] for record in egvs: record["unit"] = unit record["rate_unit"] = rate_unit record["@timestamp"] = record.pop("systemTime") record.pop("displayTime") record["realtime_value"] = record.pop("realtimeValue") record["smoothed_value"] = record.pop("smoothedValue") record["trend_rate"] = record.pop("trendRate") docs.append({"_index": es_index, "_type": "document", "_source": record}) return docs
094af427daaf4922371e17fca70a2b8c8539d54c
12,359
def paste_filename(search): """ Function that will create a name for the files to be saved to using the search """ # Removes any spaces cleaned_keyword = search.replace(' ', '_') # Adds 'videos.csv' at the end filename = cleaned_keyword + "_videos.csv" return filename
3279ef21e039b7a63a728a6d02714086a61f3e0e
12,361
import torch def repeat_column(column: torch.Tensor, times: int) -> torch.Tensor: """ Repeats the given column the given number of times. :param column: the column to repeat. Size [H]. :param times: the number of repetitions = W. :return: the given column repeated the given number of times. Size [H, W]. """ return column.unsqueeze(1).repeat(1, times)
dfa955fbff0c4b87a7f5cef729a454eb99c93760
12,364
def parse_markers(f): """ Parse markers from mrk file f. Each marker determines a time point and an according class-label of the movement that was imagined. Args: f (String) - an mrk file Returns: tuple of lists of ints - one list for the markers, one for the labels """ mrks = list() y = list() with open(f) as f_m: for line in f_m: mrk, cls = line.strip('\n').split('\t') mrks.append(int(float(mrk))) y.append(int(float(cls))) return mrks, y
670ce43529a7aae4f4ed4341938a90eb6e714fb3
12,367
def dp_key(relations): """ generates a unique key for the dptable dictionary :param relations: set of relations :return: str """ return '-'.join(sorted([r.name for r in relations]))
e415778193d5a5c90ba7574bccc7e82d0d95c2e8
12,369
def extract_sectors(pred_df, thres): """Extracts labels for sectors above a threshold Args: pred_df (df): predicted sector thres (float): probability threshold """ long_df = ( pred_df.reset_index(drop=False) .melt(id_vars="index", var_name="division", value_name="probability") .query(f"probability > {thres}") ) out = long_df.groupby("index")["division"].apply(lambda x: list(x)) return out
a772e965685ca8ac8cd6a52861c36e9cf7faf887
12,377
import bisect def find_closest(numeric_list, query_number): """ Given a list of numbers, and a single query number, find the number in the sorted list that is numerically closest to the query number. Uses list bisection to do so, and so should be O(log n) """ sorted_numeric_list = sorted(numeric_list) pos = bisect.bisect_left(sorted_numeric_list, query_number) if pos == 0: return sorted_numeric_list[0] if pos == len(sorted_numeric_list): return sorted_numeric_list[-1] before = sorted_numeric_list[pos - 1] after = sorted_numeric_list[pos] if after - query_number < query_number - before: return after else: return before
ae2dad58162f38f7c1e4d7149943488a96c3c8dc
12,378
def count_features_type(types, include_binary=False): """ Counts two or three different types of features (binary (optional), categorical, continuous). :param types: list of types from get_type :returns a tuple (binary (optional), categorical, continuous) """ if include_binary: return ( types.count('binary'), types.count('categorical'), types.count('continuous') ) else: return ( types.count('categorical'), types.count('continuous') )
4158122256c9a407f58987d278657e4a006e6a13
12,386
def is_pandas_module(fullname: str) -> bool: """Check if a fully qualified name is from the pandas module""" return fullname.startswith("pandas.")
0becdbfd7c1c4f5b7990cbc0466a6e45f25acb14
12,387
def squeeze_first(inputs): """Remove the first dimension in case it is singleton.""" if len(inputs) == 1: inputs = inputs[0] return inputs
c2c0cabc873baf88ce7673f2c8889fedce0f05da
12,390
def is_part_of_word(word_fragment, wordlist): """Returns True if word_fragment is the beginning of a word in wordlist. Returns False otherwise. Assumes word_fragment is a string.""" for word in wordlist: is_part_of_list = word_fragment == word[:len(word_fragment)] if is_part_of_list == True: return True return False
54f572655fe7bb383cb00d732b57d85156b5f528
12,395
def render_output(data): """Print the formatted output for the list """ output = ['[Dataduct]: '] output.extend(data) return '\n'.join(output)
5e3bee31890f682eca6aa03128dbf8d51e2fe473
12,396
def get_parent_doc(__type: type, /) -> str | None: """Get the nearest parent documentation using the given :py:class:`type`'s mro. :return The closest docstring for an object's class, None if not found. """ doc = None for parent in __type.__mro__: doc = parent.__doc__ if doc: break return doc
efe61d30a82e08ccdf5411ffc9feb4252fdb53e2
12,397
def drop_multiple_col(col_names_list, df): """AIM -> Drop multiple columns based on their column names. INPUT -> List of column names, df. OUTPUT -> updated df with dropped columns""" df.drop(col_names_list, axis=1, inplace=True) return df
991144349d383b79e1510fa5c106226254f8329b
12,398
def replstring(string, i, j, repl): """ Replace everything in string between and including indices i and j with repl >>> replstring("abc", 0, 0, "c") 'cbc' >>> replstring("abc def LOL jkl", 8, 10, "ghi") 'abc def ghi jkl' """ # Convert to list since strings are immutable strlist = list(string) # Delete characters between given indices for k in range(j - i + 1): del strlist[i] # i instead of k, since deleting an element makes list smaller # Insert new chars for l in range(len(repl)): strlist = strlist[:i + l] + [repl[l]] + strlist[i + l:] return "".join(strlist)
97eee8912a6c8fd9e29a5784af1f3853b714cf0b
12,399
def cli(ctx, workflow_id): """Delete a workflow identified by `workflow_id`. Output: A message about the deletion .. warning:: Deleting a workflow is irreversible - all workflow data will be permanently deleted. """ return ctx.gi.workflows.delete_workflow(workflow_id)
9ee3aa82a9577f9b20574f821f4a9e226665740d
12,402
def int2bit(x, w=20): """ Generates a binary representation of an integer number (as a tuple) >>> bits = int2bit(10, w=4) >>> bits (1, 0, 1, 0) >>> bit2int( bits ) 10 """ bits = [ ] while x: bits.append(x%2) x /= 2 # a bit of padding bits = bits + [ 0 ] * w bits = bits[:w] bits.reverse() return tuple(bits)
b65cd8f7c6232896eb2aef9f9d69b6ac4fd97bc6
12,405
def to_tuple(x): """Converts lists to tuples. For example:: >>> from networkx.utils import to_tuple >>> a_list = [1, 2, [1, 4]] >>> to_tuple(a_list) (1, 2, (1, 4)) """ if not isinstance(x, (tuple, list)): return x return tuple(map(to_tuple, x))
29586512b336ae5079e991bb13c1ac904e5eefe9
12,408
def mean_residue_ellipticity(phi, n, c, l): """ Calculate mean residue ellipticity (millideg cm2 / decimol) from ellipticity (mdeg) Args: phi (float): a ellipticity (milli deg) n (int): the number of residues c (float): the molar concentration of the polymer (mol/L) l (float): the length of the cuvette (cm) Returns: a mean residue ellipticity (deg cm2 decimol^{-1} residue^{-1}) """ return phi / (10 * l * n * c)
a51a90e3a12b921b2e12fb75160929d60652dcca
12,409
from typing import List from typing import Tuple import random def generate_round_robin_matches(bots: List[str]) -> List[Tuple[str, str]]: """ Returns a list of pairs of bots that should play against each other for a round robin. """ # This makes the list of matches consistent over multiple calls. E.g. the --list option will always so same order random.seed(bots[0] + bots[-1]) # Create all possible pairs of bots with bots from the given list matches = [] count = len(bots) for i in range(count): for j in range(i + 1, count): matches.append((bots[i], bots[j])) random.shuffle(matches) return matches
1d29e36613210d8d1198fe5943c035c0e435b8dc
12,414
def bin2dec(string_num): """Turn binary into decimal.""" return str(int(string_num, 2))
5c8ba774f1a749947e64a00c86e6cb4054b44d97
12,415
def get_config_dict(robustness_tests, base_config_dict): """ Combines robustness_test and train_config_dict into a single config_dict. Args: robustness_tests (dict): robustness test config dict base_config_dict (dict): train/data/eval/model/hyperparam config dict Returns: config_dict (dict): config dict """ config_dict = {} if robustness_tests is not None: if base_config_dict is not None: config_dict = {**robustness_tests, **base_config_dict} else: config_dict = robustness_tests else: if base_config_dict is not None: config_dict = base_config_dict return config_dict
593a2307849fa27b895f18d8b9eacd679eeab04a
12,416
def distance_vector_between(point_1, point_2): """Compute and return the vector distance between two points.""" return [point_2[0] - point_1[0], point_2[1] - point_1[1]]
397d3191cc4c214bb0d4b474db2efe7c63e8a10f
12,417
def split_names(data, names, fosa_types, drop): """Separates facility type prefix from facility name inputs data: data frame containing source data from IASO names: column name to split fosa_types: list of facility types drop: list of prefixes indicating row should be dropped from data outputs data frame with name column separated into fosa type and fosa name columns """ type_pattern = '|'.join(fosa_types) data.loc[:, "fosa_type"] = data.loc[:, names].str.extract('('+type_pattern+')', expand=True) data.loc[:, "fosa_name"] = data.loc[:, names].str.replace(type_pattern, "") data = data[~(data[names].isin(drop))] data = data[~(data.fosa_name.isin(drop))] return data
4842756abd8b332d22548382f56a7c03d43609c2
12,421
def _NamesNotIn(names, mapping): """Returns a list of the values in |names| that are not in |mapping|.""" return [name for name in names if name not in mapping]
ec91dcb6e29b0a9c1aa66f04e1b61d715ded3266
12,422
import re def natural_sort(l): """ Takes in a list of strings and returns the list sorted in "natural" order. (e.g. [test1, test10, test11, test2, test20] -> [test1, test2, test10, test11, test20]) Source: https://stackoverflow.com/questions/4836710/is-there-a-built-in-function-for-string-natural-sort Parameters ---------- l : list of str Unsorted list of strings Returns ------- sorted_l : list of str Sorted list of strings """ convert = lambda text: int(text) if text.isdigit() else text.lower() alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ] return sorted(l, key = alphanum_key)
7db22ee5f75703f52b25eecd888847eb35590e65
12,423
def get_extensions(list_of_files): """Function take a list of Path file objects and adds the file extenstion/suffix to a set. The set of extensions is returned""" extensions = set() for file in list_of_files: if len(file.suffix) < 6: extensions.add((file.suffix).lstrip('.')) return extensions
654b893c148535a99dd112dc3711ea9005ae96b7
12,424
def org_rm_payload(org_default_payload): """Provide an organization payload for removing a member.""" rm_payload = org_default_payload rm_payload["action"] = "member_removed" return rm_payload
e976396f6fe5de1073f2235aac36b300f129867a
12,427
def get_progress_rate(k, c_species, v_reactants): """Returns the progress rate for a reaction of the form: va*A+vb*B --> vc*C. INPUTS ======= k: float Reaction rate coefficient c_species: 1D list of floats Concentration of all species v_reactants: 1D list of floats Stoichiometric coefficients of reactants RETURNS ======== w: float prgress rate of this reaction NOTES ===== PRE: - k, each entry of c_species and v_reactants have numeric type - c_species and v_reactants have the same length POST: - k, c_species and v_reactants are not changed by this function - raises a ValueError if k <= 0 - raises an Exception if c_species and v_reactants have different length - returns the prgress rate w for the reaction EXAMPLES ========= >>> get_progress_rate(10, [1.0, 2.0, 3.0], [2.0, 1.0, 0.0]) 20.0 """ if k <= 0: raise ValueError('k must be positive.') if len(c_species) != len(v_reactants): raise Exception('List c_species and list v_reactants must have same length.') w = k for c, v in zip(c_species, v_reactants): w *= pow(c, v) return w
6baaaa07fe0814dbc50516b29ba55087c4ec23fd
12,431
import json def get_key(filepath, key, default=None): """ Opens the file and fetches the value at said key :param str filepath: The path to the file :param str key: The key to fetch :param default: The value to return if no key is found :return: The value at the key (or the default value) """ with open(filepath, "r") as f: file_content = json.load(f) return file_content.get(key, default)
9f720f33373ceec9a9a1a4b46c0e9257d3a55787
12,435
def commonprefix(l): """Return the common prefix of a list of strings.""" if not l: return '' prefix = l[0] for s in l[1:]: for i, c in enumerate(prefix): if c != s[i]: prefix = s[:i] break return prefix
235636c207c89a7128295fb5aa5b0cba732f50a1
12,441
def get_qtypes(dataset_name, part): """Return list of question-types for a particular TriviaQA-CP dataset""" if dataset_name not in {"location", "person"}: raise ValueError("Unknown dataset %s" % dataset_name) if part not in {"train", "dev", "test"}: raise ValueError("Unknown part %s" % part) is_biased = part in {"train", "dev"} is_location = dataset_name == "location" if is_biased and is_location: return ["person", "other"] elif not is_biased and is_location: return ["location"] elif is_biased and not is_location: return ["location", "other"] elif not is_biased and not is_location: return ["person"] else: raise RuntimeError()
0aa1a186ebf4fcfe5820ecbb697d8cb166114310
12,443
import re def get_numbers_from_file(path, skip_lines=2): """ Function to read a file line-wise and extract numbers. Parameters ---------- path: str Path to the file including the filename. skip_lines: int Number of lines to skipp at the beginning of the file. Returns ------- lst: list A list with sepereated entries for found numbers. """ with open(path, "r") as data_file: lst = [] for string in data_file: line = re.findall( "[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?", string ) lst.append(line) del lst[0:skip_lines] return lst
64026a6c5cf9aa16076a3c8872663ab7996c1add
12,445
def describe_list_indices(full_list): """ Describe the indices of the given list. Parameters ---------- full_list : list The list of items to order. Returns ------- unique_elements : list A list of the unique elements of the list, in the order in which they first appear. element_indices : dict A dictionary of lists for each unique element, giving all the indices in which they appear in the original list. """ unique_elements = [] element_indices = {} for i in range(len(full_list)): item = full_list[i] # new item if item not in unique_elements: unique_elements.append(item) element_indices[item] = [i] # previously seen item else: element_indices[item].append(i) return unique_elements, element_indices
664bcfd63dd0d5d5114ce24a4c7b2850b61364c5
12,447
def load_metadata_txt(file_path): """ Load distortion coefficients from a text file. Parameters ---------- file_path : str Path to a file. Returns ------- tuple of floats and list Tuple of (xcenter, ycenter, list_fact). """ if ("\\" in file_path): raise ValueError( "Please use a file path following the Unix convention") with open(file_path, 'r') as f: x = f.read().splitlines() list_data = [] for i in x: list_data.append(float(i.split()[-1])) xcenter = list_data[0] ycenter = list_data[1] list_fact = list_data[2:] return xcenter, ycenter, list_fact
d1220c39fd9e69b76b1aa6f3e73cd9eef708c451
12,448
from typing import OrderedDict def gen_top(service_uri, no_pages, num_mem, label=None): """ Generate the top level collection page. :param service_uri: base uri for the AS paged site. :param no_pages: :param num_mem: :param label: :return: dict """ top = OrderedDict() top['@context'] = [ "http://iiif.io/api/presentation/2/context.json", "https://www.w3.org/ns/activitystreams" ] top['id'] = service_uri top['type'] = 'OrderedCollection' if label: top['label'] = label top['total'] = num_mem top['first'] = {'id': service_uri + str(1), 'type': 'OrderedCollectionPage'} top['last'] = {'id': service_uri + str(no_pages), 'type': 'OrderedCollectionPage'} return top
14b330e0ad57b08462b3854eba2771e859344df2
12,449
def a_simple_function(a: str) -> str: """ This is a basic module-level function. For a more complex example, take a look at `a_complex_function`! """ return a.upper()
a511eb8577e0440d0026c4762628fdb6de9be643
12,454
def paginated_list(full_list, max_results, next_token): """ Returns a tuple containing a slice of the full list starting at next_token and ending with at most the max_results number of elements, and the new next_token which can be passed back in for the next segment of the full list. """ sorted_list = sorted(full_list) list_len = len(sorted_list) start = sorted_list.index(next_token) if next_token else 0 end = min(start + max_results, list_len) new_next = None if end == list_len else sorted_list[end] return sorted_list[start:end], new_next
99a20e3af4ff64d3de52eccbc00aea9fda735e00
12,455
def celsius_to_kelvin(deg_C): """Convert degree Celsius to Kelvin.""" return deg_C + 273.15
45eb2dd781adbab2db44f5a8ce5042b2834c7c00
12,456
def params_linear_and_squares(factors): """Index tuples for the linear_and_squares production function.""" names = factors + [f"{factor} ** 2" for factor in factors] + ["constant"] return names
8f7505cb2a07e08d3ad2686d52960d631cd7b458
12,461
def tuple_as_atom(atom:tuple) -> str: """Return readable version of given atom. >>> tuple_as_atom(('a', (3,))) 'a(3)' >>> tuple_as_atom(('bcd', ('bcd',12))) 'bcd(bcd,12)' """ assert len(atom) == 2 return '{}({})'.format(atom[0], ','.join(map(str, atom[1])))
5c18f34733d839865eef35509f95c2d4d198a903
12,465
import struct def Fbytes(f): """ Return bytes representation of float """ return struct.pack("f", f)
117fb86216ad6983851923ac9dbd0196cc29b92d
12,466
def clamp(value, low, high): """Clamp the given value in the given range.""" return max(low, min(high, value))
48303b27d78f8d532b1e74db052457ae19b9f10d
12,469
def fmt_n(large_number): """ Formats a large number with thousands separator, for printing and logging. Param large_number (int) like 1_000_000_000 Returns (str) like '1,000,000,000' """ return f"{large_number:,.0f}"
7b23e902a1c9600f1421b45b27623abaa1930f05
12,470
def inner_product(L1, L2): """ Take the inner product of the frequency maps. """ result = 0. for word1, count1 in L1: for word2, count2 in L2: if word1 == word2: result += count1 * count2 return result
65ede8eddcf86d75a1b130a76381416c2a272f61
12,472
def x_point_wgt_av(df_agg, x_var): """ Set the x_point to be the weighted average of x_var within the bucket, weighted by stat_wgt. """ if not (x_var + '_wgt_av') in df_agg.columns: raise ValueError( "\n\tx_point_wgt_av: This method can only be used when" "\n\tthe weighted average has already been calculated." ) res = df_agg.assign( x_point=lambda df: df[x_var + '_wgt_av'] ) return(res)
15d8515efceb67e1dc9062d7fd79250c5935b549
12,474
from typing import Mapping from typing import Optional def _rename_nodes_on_tree( node: dict, name_map: Mapping[str, str], save_key: Optional[str] = None, ) -> dict: """Given a tree, a mapping of identifiers to their replacements, rename the nodes on the tree. If `save_key` is provided, then the original identifier is saved using that as the key.""" name = node["name"] renamed_value = name_map.get(name, None) if renamed_value is not None: # we found the replacement value! first, save the old value if the caller # requested. if save_key is not None: node[save_key] = name node["name"] = renamed_value for child in node.get("children", []): _rename_nodes_on_tree(child, name_map, save_key) return node
b8d3df2f2247b27c614b767b28eda7b91e380524
12,478
def ignore(name): """ Files to ignore when diffing These are packages that we're already diffing elsewhere, or files that we expect to be different for every build, or known problems. """ # We're looking at the files that make the images, so no need to search them if name in ['IMAGES']: return True # These are packages of the recovery partition, which we're already diffing if name in ['SYSTEM/etc/recovery-resource.dat', 'SYSTEM/recovery-from-boot.p']: return True # These files are just the BUILD_NUMBER, and will always be different if name in ['BOOT/RAMDISK/selinux_version', 'RECOVERY/RAMDISK/selinux_version']: return True # b/26956807 .odex files are not deterministic if name.endswith('.odex'): return True return False
d5f64616480fa14c03b420165d2c89040a8cc768
12,481
def power(work, time): """ Power is the rate at which the work is done. Calculates the amountof work done divided by the time it takes to do the work Parameters ---------- work : float time : float Returns ------- float """ return work / time
48eb476658fa19a6002b428993bfa58c81634638
12,482
def find(*patterns): """Decorate a function to be called for each time a pattern is found in a line. :param str patterns: one or more regular expression(s) Each argument is a regular expression which will trigger the function:: @find('hello', 'here') # will trigger once on "hello you" # will trigger twice on "hello here" # will trigger once on "I'm right here!" This decorator can be used multiple times to add more rules:: @find('here') @find('hello') # will trigger once on "hello you" # will trigger twice on "hello here" # will trigger once on "I'm right here!" If the Sopel instance is in a channel, or sent a ``PRIVMSG``, the function will execute for each time a received message matches an expression. Each match will also contain the position of the instance it found. Inside the regular expression, some special directives can be used. ``$nick`` will be replaced with the nick of the bot and ``,`` or ``:``, and ``$nickname`` will be replaced with the nick of the bot:: @find('$nickname') # will trigger for each time the bot's nick is in a trigger .. versionadded:: 7.1 .. note:: The regex rule will match once for each non-overlapping match, from left to right, and the function will execute for each of these matches. To match only once from anywhere in the line, use the :func:`search` decorator instead. To match only once from the start of the line, use the :func:`rule` decorator instead. """ def add_attribute(function): function._sopel_callable = True if not hasattr(function, "find_rules"): function.find_rules = [] for value in patterns: if value not in function.find_rules: function.find_rules.append(value) return function return add_attribute
884005008791baba3a9949e9d2c02aee0f985552
12,484
import re def validate_container_name(name): """Make sure a container name accordings to the naming convention https://docs.openstack.org/developer/swift/api/object_api_v1_overview.html https://lists.launchpad.net/openstack/msg06956.html > Length of container names / Maximum value 256 bytes / Cannot contain the / character. """ validate_name = re.compile('^[^/]+$') return ( len(name) <= 256 and bool(validate_name.match(name)) )
5bef8b304c004dc3169b6984b49a0d669fc9b7b3
12,490
def function(values): """A simple fitness function, evaluating the sum of squared parameters.""" return sum([x ** 2 for x in values])
ee96b0948e43eec1e86ffeeca681567d2a0afa53
12,491
def create_redis_compose_node(name): """ Args: name(str): Name of the redis node Returns: dict: The service configuration for the redis node """ return { "container_name": name, "image": "redis:3.2.8", "command": "redis-server --appendonly yes", "deploy": { "placement": { "constraints": ["node.role == worker"] } }, "volumes": ["./volumes/{:s}/:/data/".format(name)] }
e6d01acc8b0c5c324ad3e0f6e5f527e2a6433705
12,493
def sun_rot_elements_at_epoch(T, d): """Calculate rotational elements for Sun. Parameters ---------- T: float Interval from the standard epoch, in Julian centuries i.e. 36525 days. d: float Interval in days from the standard epoch. Returns ------- ra, dec, W: tuple (float) Right ascension and declination of north pole, and angle of the prime meridian. """ ra = 286.13 dec = 63.87 W = 84.176 + 14.1844000 * d return ra, dec, W
9a74edc686869eebd851200687fe4d10d38d550a
12,497
def NameAndAttribute(line): """ Split name and attribute. :param line: DOT file name :return: name string and attribute string """ split_index = line.index("[") name = line[:split_index] attr = line[split_index:] return name, attr
7595f51d728c5527f76f3b67a99eccd82fb9e8b7
12,500
import torch import copy def clones(module: torch.nn.Module, n: int): """ Produce N identical copies of module in a ModuleList :param module: The module to be copied. The module itself is not part of the output module list :param n: Number of copies """ return torch.nn.ModuleList([copy.deepcopy(module) for _ in range(n)])
e4807cda87c90af415555606e3308f07ff9ddf49
12,504
def is_sorted(array): """ Check if array of numbers is sorted :param array: [list] of numbers :return: [boolean] - True if array is sorted and False otherwise """ for i in range(len(array) - 1): if array[i] > array[i + 1]: return False return True
8542e162aa96f7035d33f1eb7b7159031a8a41b8
12,506
def FormatDescriptorToPython(i): """ Format a descriptor into a form which can be used as a python attribute example:: >>> FormatDescriptorToPython('(Ljava/lang/Long; Ljava/lang/Long; Z Z)V') 'Ljava_lang_LongLjava_lang_LongZZV :param i: name to transform :rtype: str """ i = i.replace("/", "_") i = i.replace(";", "") i = i.replace("[", "") i = i.replace("(", "") i = i.replace(")", "") i = i.replace(" ", "") i = i.replace("$", "") return i
8d217883603ae9e9c8f282985b456aa97494beba
12,507
def layer_severity(layers, layer): """Return severity of layer in layers.""" return layers[layer]['severity']
e8a7a95268ddd2d4aa6b5f7fa66d5828016517eb
12,509
def _parse_alt_title(html_chunk): """ Parse title from alternative location if not found where it should be. Args: html_chunk (obj): HTMLElement containing slice of the page with details. Returns: str: Book's title. """ title = html_chunk.find( "input", {"src": "../images_buttons/objednat_off.gif"} ) assert title, "Can't find alternative title!" title = title[0] assert "title" in title.params, "Can't find alternative title source!" # title is stored as Bleh bleh: Title title = title.params["title"].split(":", 1)[-1] return title.strip()
bd5df0edfc174653731256d50e0a8f84a6f84880
12,512
def prompt_worker_amount(cpu_cores: int): """Prompt the user for the amount of Celery workers they want to run. Start 2 less workers than amount of CPU cores by default.""" answer = 0 safe_cores_suggestion = cpu_cores - 2 def invalid_answer(): """Restart prompt if answer invalid""" print(f"[red]Invalid number! Please enter a whole number.[/]") prompt_worker_amount(cpu_cores) try: # Input doesn't like parsing colours print( f"[yellow]How many workers would you like to start?[/]\n" + f"Press ENTER for default: {safe_cores_suggestion}\n" ) answer = int(input() or safe_cores_suggestion) except ValueError: invalid_answer() if answer == 0: print(f"[yellow]Using suggested amount: {safe_cores_suggestion}[/]") answer = safe_cores_suggestion return answer
2f2e4d423eb94d0bf709feda09c5c45ca8cc481a
12,513
def tab_delimited(list_to_write): """Format list of elements in to a tab-delimited line""" list_to_write = [str(x) for x in list_to_write] return '\t'.join(list_to_write) + '\n'
921a3316f01aecaba1efb43c524b1ad5cca89546
12,523
def get_sorted_indices(some_list, reverse=False): """Get sorted indices of some_list Parameters ---------- some_list : list Any list compatible with sorted() reverse : bool Reverse sort if True """ return [ i[0] for i in sorted( enumerate(some_list), key=lambda x: x[1], reverse=reverse ) ]
6e207f4079cd3800fd26269725f5385250b76112
12,529
def excel_index(column): """ Takes a string and returns what column it would be in Excel. """ # Not needed but will insure the right values. There are different # ASCII values for uppercase and lowercase letters. letters = list(column.upper()) # This works like this: LOL = 8514 # 26^2 * 12(L) + 26^1 * 15(O) + 26^0 * 12 = 8514 rev = list(reversed(letters)) return sum([26**i * (ord(x) - 64) for i, x in enumerate(rev)])
224d7f370468e232a68a8205cf101e8dcb959829
12,531
def gp_tuple_to_dict(gp_tuple): """Convert a groupings parameters (gp) tuple into a dict suitable to pass to the ``grouping_parameters`` CompoundTemplate.__init__ kwarg. """ params = [{'min': 1}, {'max': 1}, {'name': None}, {'possible_types': None}, {'is_separator': False}, {'inner_sep_type': None}] d = {} for i, param in enumerate(params): if i < len(gp_tuple): d[list(param.keys())[0]] = gp_tuple[i] else: d[list(param.keys())[0]] = list(param.values())[0] return d
5db98d0530dc177685e0bcfefad94a1f6718aed9
12,537
from typing import Optional def convert_int_or_none(val: Optional[int]) -> Optional[int]: """Convert to an int or None.""" return int(val) if val is not None else val
1920243d7465df6f2f3f9e6fdbd2424bbad165b4
12,539
def parse_object(repo, objectish): """Parse a string referring to an object. :param repo: A `Repo` object :param objectish: A string referring to an object :return: A git object :raise KeyError: If the object can not be found """ return repo[objectish]
6606eb58a1aab94a6071bf18221dbec63e2ef6da
12,545
def elision_normalize(s): """Turn unicode characters which look similar to 2019 into 2019.""" return s.replace("\u02BC", "\u2019").replace("\u1FBF", "\u2019").replace("\u0027", "\u2019").\ replace("\u1FBD", "\u2019")
a7b0f2ba14d0fcbb2cd1cc97b8ce858051d35709
12,548
def constrain_to_range(s, min_val, max_val): """ Make sure that a value lies in the given (closed) range. :param s: Value to check. :param min_val: Lower boundary of the interval. :param max_val: Upper boundary of the interval. :return: Point closest to the input value which lies in the given range. :rtype: float """ return max(min(s, max_val), min_val)
d2017580bab60ba444cbf40f570b16763de81969
12,550
def find_piece_size(total_size): """ Determine the ideal piece size for a torrent based on the total size of the data being shared. :param total_size: Total torrent size :type total_size: int :return: Piece size (KB) :rtype: int """ if total_size <= 2 ** 19: return 512 elif total_size <= 2 ** 20: return 1024 elif total_size <= 2 ** 21: return 2048 elif total_size <= 2 ** 22: return 4096 elif total_size <= 2 ** 23: return 8192 elif total_size <= 2 ** 24: return 16384 else: raise ValueError("Total size is unreasonably large")
532adc41448ce5dfb11f02699c987936e1abda0a
12,555
def fake_file(filename, content="mock content"): """ For testing I sometimes want specific file request to return specific content. This is to make creation easier """ return {"filename": filename, "content": content}
5b342edf9dec65987223fbbc8b670402513ae4ed
12,557
def flatten(sequence): """Given a sequence possibly containing nested lists or tuples, flatten the sequence to a single non-nested list of primitives. >>> flatten((('META.INSTRUMENT.DETECTOR', 'META.SUBARRAY.NAME'), ('META.OBSERVATION.DATE', 'META.OBSERVATION.TIME'))) ['META.INSTRUMENT.DETECTOR', 'META.SUBARRAY.NAME', 'META.OBSERVATION.DATE', 'META.OBSERVATION.TIME'] """ flattened = [] for elem in sequence: if isinstance(elem, (list, tuple)): elem = flatten(elem) else: elem = [elem] flattened.extend(elem) return flattened
6ca3fe470757dc4081c4387d917d5e285c2a3f06
12,560
def relevant_rule(rule): """Returns true if a given rule is relevant when generating a podspec.""" return ( # cc_library only (ignore cc_test, cc_binary) rule.type == "cc_library" and # ignore empty rule (rule.hdrs + rule.textual_hdrs + rule.srcs) and # ignore test-only rule not rule.testonly)
3e1a45d222128e0065eb585135806a0f8bb787d9
12,561
def _decrement_version(lambda_config): """Decrement the Lambda version, if possible. Args: lambda_config (dict): Lambda function config with 'current_version' Returns: True if the version was changed, False otherwise """ current_version = lambda_config['current_version'] if current_version == '$LATEST': return False int_version = int(current_version) if int_version <= 1: return False lambda_config['current_version'] = int_version - 1 return True
a06ed14e0abaa68a809bdb49c2d4f2cc59ce6db2
12,564
import collections def rollout(env, agent, max_steps): """Collects a single rollout of experience. Args: env: The environment to interact with (adheres to gym interface). agent: The agent acting in the environment. max_steps: The max number of steps to take in the environment. Returns: A dictionary of lists containing information from the trajectory. """ assert max_steps > 0 traj = collections.defaultdict(list) def add_step(**kwargs): for k, v in kwargs.items(): traj[k].append(v) s = env.reset() num_steps = 0 while num_steps < max_steps: a, a_info = agent.step(s) sp, r, t, _ = env.step(a) add_step(s=s, a=a, r=r, t=t, a_info=a_info) s = sp num_steps += 1 if t: break # Handle certain edge cases during sampling. # 1. Ensure there's always a next state. traj["s"].append(s) # 2. Ensure that the agent info (importantly containing the next-state-value) always exists. _, a_info = agent.step(s) traj["a_info"].append(a_info) return traj
eb9a9f41b9c37e1c5f8ebdbbec13650dd7665622
12,566
def tms_mpsse(bits): """convert a tms bit sequence to an mpsee (len, bits) tuple""" n = len(bits) assert (n > 0) and (n <= 7) x = 0 # tms is shifted lsb first for i in range(n - 1, -1, -1): x = (x << 1) + bits[i] # only bits 0 thru 6 are shifted on tms - tdi is set to bit 7 (and is left there) # len = n means clock out n + 1 bits return (n - 1, x & 127)
25d2dc3b1edd5494e82295fe50889565246c2ae5
12,567
def _aggregate(query, func, by=None): """ Wrap a query in an aggregation clause. Use this convenience function if the aggregation parameters are coming from user input so that they can be validated. Args: query (str): Query string to wrap. func (str): Aggregation function of choice. Valid choices are 'avg'/'mean', 'min', 'max', 'sum'. by (list of str): Optional list of variables by which to perform the aggregation. Returns: str: New query string. """ if func == "mean": func = "avg" if func not in ["avg", "min", "max", "sum"]: raise ValueError("Unsupported aggregation function %r" % func) query = "{func}({query})".format(func=func, query=query) if by: query += " by({by_variables})".format(by_variables=", ".join(by)) return query
e26aa715fadc5a58f5f87cee297fc3e6500120e1
12,568
from typing import Any def accept(message: Any) -> bool: """ Prompts the user to enter "yes" or "no". Returns True if the response was "yes", otherwise False. Ctrl-c counts as "no". """ message = f"[pretf] {message} [yes/no]: " response = "" while response not in ("yes", "no"): try: response = input(message).lower() except KeyboardInterrupt: response = "no" print() return response == "yes"
884bf321462ef37f02a69925ece012e108fad861
12,569
def format_seconds(total_seconds: int) -> str: """Format a count of seconds to get a [H:]M:SS string.""" prefix = '-' if total_seconds < 0 else '' hours, rem = divmod(abs(round(total_seconds)), 3600) minutes, seconds = divmod(rem, 60) chunks = [] if hours: chunks.append(str(hours)) min_format = '{:02}' else: min_format = '{}' chunks.append(min_format.format(minutes)) chunks.append('{:02}'.format(seconds)) return prefix + ':'.join(chunks)
c0f79b7f45c32589537b5dbf51a95b4811c50417
12,570
import six def rev_comp( seq, molecule='dna' ): """ DNA|RNA seq -> reverse complement """ if molecule == 'dna': nuc_dict = { "A":"T", "B":"V", "C":"G", "D":"H", "G":"C", "H":"D", "K":"M", "M":"K", "N":"N", "R":"Y", "S":"S", "T":"A", "V":"B", "W":"W", "Y":"R" } elif molecule == 'rna': nuc_dict = { "A":"U", "B":"V", "C":"G", "D":"H", "G":"C", "H":"D", "K":"M", "M":"K", "N":"N", "R":"Y", "S":"S", "U":"A", "V":"B", "W":"W", "Y":"R" } else: raise ValueError( "rev_comp requires molecule to be dna or rna" ) if not isinstance( seq, six.string_types ): raise TypeError( "seq must be a string!" ) return ''.join( [ nuc_dict[c] for c in seq.upper()[::-1] ] )
2e42ccf5f37992d0fbe3a25afd70a04e6fc0c225
12,572
def findall_deep(node, selector, ns, depth=0, maxDepth=-1): """ recursively find all nodes matching the xpath selector :param node: the input etree node :param selector: the xpath selector :param ns: a dict of namespaces :param depth: the current depth :param maxDepth: the maximum number of levels to navigate :return: a list of matching nodes """ results = node.findall(selector, ns) if ns else node.findall(selector) if maxDepth == -1 or (depth < maxDepth): children = list(node) if children and len(children) > 0: for child in children: results = results + findall_deep(child, selector, ns, depth+1, maxDepth) return results
f93d413dd205acf5be0e5f76dd6c599f54bfac57
12,577
from typing import Union from typing import List def table_insert(name: str, field_names: Union[str, List[str]]) -> str: """Return command to add a record into a PostgreSQL database. :param str name: name of table to append :param field_names: names of fields :type: str or list :return: command to append records to a table :rtype: str Example: import psql cur = psql.connection('db', 'user', 'password') [cur.execute(psql.table_insert('table', 'field'), (x, )) for x in values] """ if isinstance(field_names, str): field_names = [field_names] length = len(field_names) if length > 1: values = ','.join(['%s'] * length) else: values = '%s' return '''INSERT INTO {table_name} ({fields}) VALUES ({values});'''.format(table_name=name, fields=', '.join(field_names), values=values)
a50aadebe655118c255ccb81c3c0852646057ff4
12,578
def __normalize(variable): """ Scale a variable to mean zero and standard deviation 1 Parameters ---------- variable : xarray.DataArray or np.ndarray Returns ------- xarray.DataArray or np.ndarray """ mean = variable.mean() std = variable.std() if std != 0: result = (variable - mean) / std else: result = variable - mean return result
7d6329ef6454deb04b041a630a7f1f084f237b57
12,580
def get_proj(ds): """ Read projection information from the dataset. """ # Use geopandas to get the proj info proj = {} maybe_crs = ds.geometry.crs if maybe_crs: maybe_epsg = ds.geometry.crs.to_epsg() if maybe_epsg: proj["proj:epsg"] = maybe_epsg else: proj["proj:wkt2"] = ds.geometry.crs.to_wkt() return proj
4ed68d8733285cdea92c1b167a3d7f59024845db
12,584
def nameFormat(name): """ Edits the name of the column so that it is properly formatted with a space between the words, and each word capitalized.""" space = name.find("l") + 1 firsthalf = name[:space] secondhalf = name[space:] name = firsthalf.capitalize() + " " + secondhalf.capitalize() return name
d93b54d6a18347aeb32657c1f8880965d01db7f2
12,585
def invert_cd_path(graph, path, c, d): """ Switch the colors of the edges on the cd-path: c to d and d to c. :param graph: nx.Graph(); each edge should have an attribute "color" :param path: nx.Graph() representing cd-path :param c: integer smaller then the degree of "graph" or None; represents a color :param d: integer smaller then the degree of "graph" or None; represents a color :return: graph with switched colors """ for edge in path.edges: current_color = graph.get_edge_data(*edge)["color"] if current_color == c: graph.add_edge(*edge, color=d) if current_color == d: graph.add_edge(*edge, color=c) return graph
53455f3e442a10ee403d499fe9c5b6f2e86a6e7f
12,586
def _initialise_notable_eids(db): """Returns set of eids corresponding to "notable" entities.""" rows = db.query(""" SELECT eid FROM entity_flags WHERE political_entity=TRUE; """) notable_eids = set(row["eid"] for row in rows) print('[OK] Received %d notable eIDs.' % len(notable_eids)) return notable_eids
c11306395937fc58dadbd3e9d129f7f6d7b4b576
12,589