content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def grant_url(focus_area, month): """Use the focus area and donation date information to get the original grant URL.""" fa = { "community-development": 13, "education-youth": 28, "religion": 11, }[focus_area] month_param = month[2:] return f"https://lillyendowment.org/for-current-grantees/recent-grants/?fa={fa}&date={month_param}"
6ae83240afbec368038aba07b3f37d44ea0d1afe
27,049
def parse_srg(srg_filename): """Reads a SeargeRG file and returns a dictionary of lists for packages, classes, methods and fields""" srg_types = {'PK:': ['obf_name', 'deobf_name'], 'CL:': ['obf_name', 'deobf_name'], 'FD:': ['obf_name', 'deobf_name'], 'MD:': ['obf_name', 'obf_desc', 'deobf_name', 'deobf_desc']} parsed_dict = {'PK': [], 'CL': [], 'FD': [], 'MD': []} def get_parsed_line(keyword, buf): return dict(zip(srg_types[keyword], [i.strip() for i in buf])) with open(srg_filename, 'r') as srg_file: for buf in srg_file: buf = buf.strip() if buf == '' or buf[0] == '#': continue buf = buf.split() parsed_dict[buf[0][:2]].append(get_parsed_line(buf[0], buf[1:])) return parsed_dict
ac33b56fc52831a80f1e38261b38c47407fd1bfb
27,050
import random def choose(bot, trigger): """.choice option1|option2|option3 - Makes a difficult choice easy.""" if not trigger.group(2): return bot.reply('I\'d choose an option, but you didn\'t give me any.') choices = [trigger.group(2)] for delim in '|\\/, ': choices = trigger.group(2).split(delim) if len(choices) > 1: break choices = [choice.strip() for choice in choices] pick = random.choice(choices) # Always use a comma in the output display_options = ', '.join( choice if ',' not in choice else '"%s"' % choice for choice in choices ) return bot.reply('Your options: %s. My choice: %s' % (display_options, pick))
ad4fd246e9db86d45a7a6cb06ef5b9813c0a0d5f
27,051
from typing import List def primes(max: int) -> List[int]: """ Return a list of all primes numbers up to max. >>> primes(10) [2, 3, 5, 7] >>> primes(11) [2, 3, 5, 7, 11] >>> primes(25) [2, 3, 5, 7, 11, 13, 17, 19, 23] >>> primes(1_000_000)[-1] 999983 """ max += 1 numbers = [False] * max ret = [] for i in range(2, max): if not numbers[i]: for j in range(i, max, i): numbers[j] = True ret.append(i) return ret
1fbdda28414f9a846d2ea1dc945af619df6aeea7
27,055
def current_user(self): """Get current user""" return self.handler.current_user
790a3478390dbb5439380ef1a35b6bc67f89721a
27,056
import torch from typing import Tuple def _split_crossval(xy: torch.Tensor, crossval_count: int, crossval_index: int) -> Tuple[torch.Tensor, torch.Tensor]: """ Generates a split of the given dataset along the first dimension for cross-validation. :param xy: The data that should be split. The split will be generated acros dimension 0. :param crossval_count: The number of splits in total :param crossval_index: The index of the split that should be generated (0 <= crossval_index < crossval_count) :return: A tuple of (training data, validation data) """ n = xy.shape[0] split_size = n // crossval_count val_start = crossval_index * split_size val_end = (crossval_index + 1) * split_size train1_start = 0 if crossval_index == 0 else (crossval_index - 1) * split_size train1_end = 0 if crossval_index == 0 else val_start train2_start = val_end if crossval_index < (crossval_count - 1) else 0 train2_end = n if crossval_index < (crossval_count - 1) else 0 val = xy[val_start:val_end] train = torch.concat([xy[train1_start:train1_end], xy[train2_start:train2_end]]) return (train, val)
211146bdac3396da475b6c6b74d990031a537af7
27,061
from typing import Optional from typing import Any from typing import List from typing import Iterable def as_list(value: Optional[Any]) -> List[Any]: """Normalizes the value input as a list. >>> as_list(None) [] >>> as_list("foo") ['foo'] >>> as_list(123) [123] >>> as_list(["foo", "bar", 123]) ['foo', 'bar', 123] >>> as_list(("foo", "bar", 123)) ['foo', 'bar', 123] >>> as_list(range(5)) [0, 1, 2, 3, 4] >>> def gen(): ... yield 1 ... yield 2 >>> as_list(gen()) [1, 2] """ if value is None: return [] if isinstance(value, str): return [value] if not isinstance(value, Iterable): return [value] return list(value)
54f13890437dfafd779583a3bbdc42ae769312f4
27,062
def unique_proportion(_metrics): """Computes the proportion of unique non-null values out of all non-null values""" total_values = _metrics.get("table.row_count") unique_values = _metrics.get("column.distinct_values.count") null_count = _metrics.get("column_values.nonnull.unexpected_count") # Ensuring that we do not divide by 0, returning 0 if all values are nulls (we only consider non-nulls unique values) if total_values > 0 and total_values != null_count: return unique_values / (total_values - null_count) else: return 0
df44cc537623404d4de949ffc8bb5acf4fb62d48
27,077
import math def signed_distance_point_to_line(a, b, c, p): """ Distance between a point p and a line defined by a, b and c. a, b, c: the line $x + by + c = 0$ p: the point """ d1 = (a*p[0] + b*p[1] + c) d2 = math.sqrt(math.pow(a, 2.) + math.pow(b, 2.)) #d = abs(d1)/d2 d = d1/d2 return d
f06b67e62b940e259963b1a092650936dc8e9fb0
27,081
import yaml def validate_spackyaml(filename): """ Ensure that a spack.yaml file has a spack or env directive """ try: with open(filename, "r") as fd: data = yaml.load(fd, Loader=yaml.FullLoader) if "env" not in data and "spack" not in data: return False return True except yaml.YAMLError as exc: return False
c9bdf6de87d9945d106c2312087441f84584daef
27,082
def ResetStNum(N=0): """Reset the state numbering counter. """ global NXTSTATENUM NXTSTATENUM = N return NXTSTATENUM
7b3c49221da9110c498bc0b1dc88d230e5d65018
27,083
import binascii def bytes_str(val): """ Converts bytes into hex string. Returns: hex string """ assert isinstance(val, (bytes, bytearray)) return binascii.hexlify(val).decode('ascii')
dfe6d5192b639f656a56918f5ab570f8edd96dd7
27,084
def pull_key(key_fun): """Return a new dict with members of objs as values and values generated by key_fun as keys. pull_key(key_fun)(objs) :: Hashable K => (X -> K) -> Seq[X] -> {K : X} Equivalent to the following in Python 3: {key_fun(v):v for v in objs} >>> from operator import itemgetter >>> objs = [{'id': 1, 'name': 'Fred'}, {'id': 3, 'name': 'Wilma'}] >>> result = pull_key(itemgetter('id'))(objs) >>> sorted(result.keys()) [1, 3] >>> result[1]['id'], result[1]['name'] (1, 'Fred') >>> result[3]['id'], result[3]['name'] (3, 'Wilma') """ def pull_key_fun(objs): return dict((key_fun(value), value) for value in objs) return pull_key_fun
3b2288ec0eee6f164896d7b97a31aaec9cb52422
27,091
import re def word_tokenizer(sentence): """ 将一句话分成单词的列表, 会顺便承担清除标点符号的工作 Parameters ---------- sentence : string Returns -------- words : the list of word """ solve_sentence = re.sub(r'\W+|\s+', ' ', sentence) words = solve_sentence.lower().strip(' ').split(' ') return words
835e78d4a4a35beb54e232a1d44a67332b3c267a
27,093
def flatten(list_of_lists: list) -> list: """Given a list of lists, flatten all the items into one list. >>> flatten([ [1, 2, 3], [4, 5, 6]]) [1, 2, 3, 4, 5, 6] """ return [val for sublist in list_of_lists for val in sublist]
093d3bfbb90c043414fdfb18da79334e8910b150
27,095
def timeseries_train_test_split(df, test_size=0.2, gap=0, ascending=True): """Split DataFrame or matrices into random train and test subsets for timeseries Parameters ---------- df : pd.DataFrame, shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. test_size: float, optional (default=0.2) Should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split. gap : int, default=0 Represents the absolute number of the dropped samples between training set and test set. ascending : boolean, default=True, optional Whether timeseries is ascending. Returns ------- t : list, length=2 List each list containing train-test split of inputs. """ assert gap>=0, "`gap` should be great than or equal to 0." t = df.index.tolist() if not ascending: t.reverse() train_len = round(len(t)*(1-test_size))-gap return [t[0:train_len], t[train_len+gap:]]
8a11f2b0ca1c60dc7c9a92419a6f5a2ab8994815
27,097
def list_to_string(l): """ Converts a string list in to a string @param l is the string list @returns a concatentation of each element in the list """ res = "" for elem in l: res += elem return res
ef30e847703f56453c79a2c51ba61497f26b4963
27,101
def is_aware( time_point): """ Return whether ``time_point`` is aware of the timezone """ return time_point.tzinfo is not None and \ time_point.utcoffset() is not None
9169af11d30bb1fca2b4dbc7708dfee7e44e858f
27,104
import logging from pathlib import Path def get_filelist(paths: list[str], recursive: bool = False) -> list[str]: """Get a list of files (but not directories) from a path. Args: paths (list[str]): The path(s) to search for files. recursive (bool, optional): Whether to recursively search the path. Defaults to False. Returns: list[str]: A list of all filenames, given as absolute paths. """ logger = logging.getLogger("checkr") filelist = [] for path in paths: dir = Path(path) if not dir.exists(): logger.error(f"The directory '{dir}' does not exist.") elif not dir.is_dir(): logger.error(f"'{dir}' is not a directory.") else: results = dir.rglob("*") if recursive else dir.glob("*") filelist.extend([x for x in results if x.is_file()]) return filelist
9b73c1d0845385279a90e860a7666aea2342912c
27,109
import re def size2integer(value): """Try to convert a string representing a size to an integer value in bytes. Supported formats: * K|k for KB * M|m for MB * G|g for GB :param value: the string to convert :return: the corresponding integer value """ m = re.match("(\d+)\s*(\w+)", value) if m is None: if re.match("\d+", value): return int(value) return 0 if m.group(2)[0] in ["K", "k"]: return int(m.group(1)) * 2 ** 10 if m.group(2)[0] in ["M", "m"]: return int(m.group(1)) * 2 ** 20 if m.group(2)[0] in ["G", "g"]: return int(m.group(1)) * 2 ** 30 return 0
f7771bd9fd8904d03a7ab75da5939e01d4a126d7
27,110
def find_domain_zone_matches(domains, zones): """ Finds matches between Amazon SES verified domains and Route 53 hosted zones. Subdomain matches are taken when found, otherwise root domain matches are taken. :param domains: The list of domains to match. :param zones: The list of hosted zones to match. :return: The set of matched domain-zone pairs. When a match is not found, the domain is included in the set with a zone value of None. """ domain_zones = {} for domain in domains: domain_zones[domain] = None # Start at the most specific sub-domain and walk up to the root domain until a # zone match is found. domain_split = domain.split('.') for index in range(0, len(domain_split) - 1): sub_domain = '.'.join(domain_split[index:]) for zone in zones: # Normalize the zone name from Route 53 by removing the trailing '.'. zone_name = zone['Name'][:-1] if sub_domain == zone_name: domain_zones[domain] = zone break if domain_zones[domain] is not None: break return domain_zones
d6a0ed7bd974c411aeb4d4571a23565b7c21ca59
27,111
def format_seconds(s): """ Format a seconds value into a human-readable form """ years, s = divmod(s, 31556952) min, s = divmod(s, 60) h, min = divmod(min, 60) d, h = divmod(h, 24) return '%sy, %sd, %sh, %sm, %ss' % (years, d, h, min, s)
f004d9f2cef8b3b9eee967ebd9d4811cbc80ae6c
27,113
def sanitize_string(input_string): """Removes unwanted characters from a string and returns the result """ return input_string.replace('"', '')
596bd7bf4810ce9ef2b96c6d6fead947bd4e22ec
27,116
def parse_distro_info_path(path): """Break distro_info path into repo + file""" path = path.strip().rsplit("/", 1) info_repo = path[0] info_file = path[1] remote = False if info_repo.startswith("http"): remote = True return info_file, info_repo, remote
44f221485e04b6e3203ac3863a10d06fcbbe1555
27,123
import math def volatilization_rate(k, mass, dz, dt): """ Compute the transfer of the volatilization (kg/s) source : (“CHEMMAP technical User’s manual 6.10,” 2014) params ------ k : volatilization coefficient [m/s] dz : vertical diffusivity [m²/s] dt : length of a timestep [s] mass: mass of the pollutant [kg] """ return k * mass / math.sqrt(2 * dz * dt)
69ebb196ecb9a860adcb46b5775e90a85c2ca06c
27,125
def _FlattenToScoresList(config_param_score_dict): """Extracts a list of scores from input data structure. Args: config_param_score_dict: of the form {'capture_name': {'param_name' : score_value,.. } ..} Returns: Plain list of all score value present in input data structure """ result = [] for capture_name in config_param_score_dict: result += list(config_param_score_dict[capture_name].values()) return result
188e784f59711bbe46f89c4c8171c3a0551a0cf3
27,129
def filter_dataset(dataset, filter_indices): """ Filter a dataset consisting of a dictionary for the different variables by a vector of True/False indicating if the data point (a row index) should be kept or not. Returning a copy to leave the original dataset untouched. """ dataset_copy = dataset.copy() for key in dataset_copy.keys(): dataset_copy[key] = dataset_copy[key][filter_indices, :] return dataset_copy
256878262c879f4b914693a7333236539f062bc6
27,131
from typing import Mapping def check_dict_nested_attrs(item: Mapping, dict_data: Mapping) -> bool: """ Checks the values from `dict_data` are contained in `item` >>> d = {'a': 1, 'b': {'c': 2}} >>> check_dict_nested_attrs(d, {'a': 1}) True >>> check_dict_nested_attrs(d, {'b': {'c': 2}}) True >>> check_dict_nested_attrs(d, {'d': []}) False """ for key, value in dict_data.items(): if key not in item: return False item_value = item[key] if isinstance(item_value, Mapping): if not check_dict_nested_attrs(item_value, value): return False elif item_value != value: return False return True
08ed8dbc405e236b95e33e10e9c342e15b6363c9
27,132
import itertools def is_almost_simplicial(G, n): """Determines whether a node n in G is almost simplicial. Parameters ---------- G : NetworkX graph The graph on which to check whether node n is almost simplicial. n : node A node in graph G. Returns ------- is_almost_simplicial : bool True if all but one of its neighbors induce a clique Examples -------- This example checks whether node 0 is simplicial or almost simplicial for a :math:`K_5` complete graph with one edge removed. >>> K_5 = nx.complete_graph(5) >>> K_5.remove_edge(1,3) >>> dnx.is_simplicial(K_5, 0) False >>> dnx.is_almost_simplicial(K_5, 0) True """ for w in G[n]: if all(u in G[v] for u, v in itertools.combinations(G[n], 2) if u != w and v != w): return True return False
a1ffdb2b23e6f0d49cb6e65220523eb0553b1a74
27,135
import csv def create_csv_table(table_path, rows_list): """Create csv file from list of lists. Args: table_path (str): file path to table. rows_list (list): nested list of elements to write to table. Returns: table_path (str): filepath to table. """ with open(table_path, 'w') as f: writer = csv.writer(f) writer.writerows(rows_list) return table_path
f46f1295c408925adac0b2e97e63200036ffd50f
27,141
def processReducedResult(key_value_tuple): """ input: (TW-100917-125717-1537514430, 4.03) output: [TW, 100917, 125717, 1537514430, 4.03] """ values = key_value_tuple[0].split("-") values.append(str(key_value_tuple[1])) return values
22ad7e6f5381ccf983ad159f190a7e1686ab2558
27,142
def _split_csv(string): """Split string into a list, excluding empty strings""" if string is None: return [] return [n.strip() for n in string.split(',') if n]
de6b21c340ec4c24462f3120f3486e974feafb9d
27,151
import random def selectNRandom(nodes, N): """ Selects n random nodes from a list of nodes and returns the list """ random.shuffle(nodes) return nodes[:N]
4d4a330136a8b56b4ee1943db318df13a3be3c3f
27,152
def globify_time_descriptors(path): """Convert strftime time descriptors to a glob-compatible expression. For example, convert,'%Y' to '[12][0-9][0-9][0-9]'. Then the glob expression can be used to match path names containing a four digit year. """ path = path.replace('%Y', '[12][0-9][0-9][0-9]') path = path.replace('%y', '[0-9][0-9]') path = path.replace('%m', '[01][0-9]') path = path.replace('%d', '[0-3][0-9]') path = path.replace('%H', '[0-2][0-9]') path = path.replace('%M', '[0-5][0-9]') path = path.replace('%S', '[0-5][0-9]') return path
b28d47c903742def7bb9517102f7df3b989fb1ae
27,157
def MakeTuple(object_): """ Returns the given object as a tuple, if it is not, creates one with it inside. @param: Any object or tuple The object to tupleIZE """ if isinstance(object_, tuple): return object_ else: return (object_,)
119e17785bcf5c7b8dae48c422d12dc501c32d57
27,159
def reorder_exons(exon_ids): """ Reorder exons if they were out of order. Parameters: exon_ids (list of str): List of exons 'chrom_coord1_coord2_strand_exon' Returns: exons (list of str): List of same exon IDs ordered based on strand and genomic location """ strand = exon_ids[0].split('_')[-2] coords = [int(i.split('_')[-4]) for i in exon_ids] exons = sorted(zip(exon_ids, coords), key=lambda x: x[1]) exons = [i[0] for i in exons] if strand == '-': exons.reverse() return exons
d3f52a24d4da1a05a1deceaf38927622c141a9ad
27,161
def get_reaction_from_name(reactions, name): """Return reaction with name. Args: reactions (list(reactions)): the reactions. name (str): name of the desired reaction. Returns: reaction: the corresponding reaction. """ matches = [r for r in reactions if r.name == name] if len(matches) > 1: raise UserWarning('ERROR: duplicate reactions in model.') elif len(matches) == 0: raise UserWarning('WARNING: could not find reaction: ' + name) else: return matches[0]
d27da8428d7a9f8e07ede7cf9e4574d2c012e480
27,166
def extract_sub_ids(file, naive=False, label_of_interest=0): """ Extracts sub_ids from dataset_x.txt files of the following format: SubjectID,Network,Station,Latitude,Longitude,PercentageEarthquakes,PercentageTremor, PercentageNoise,PercentageUnclearEvent Args: file (path): Path to file containing the dataset info naive (bool): If naive = True, apply naive selection based on threshold (no reliability score) label_of_interest (int): Only used if naive flag is set to true Returns (list): List containing extracted subject ids """ sub_ids = [] threshold = 70.0 offset_dict = {0: 5, 1: 7, 2: 6, 3: 7} # Map labels to indices according to headers label_index = offset_dict[label_of_interest] with open(file, 'r') as f: for index, line in enumerate(f.readlines()): # Don't consider the headers if index > 2: info = line.split(",") if not naive: sub_ids.append(int(info[0])) elif naive and float(info[label_index]) > threshold: sub_ids.append(int(info[0])) return sub_ids
a51daa18ec2c1799dc9e07e59008088290977bb3
27,168
def format_api_output(response): """Format retrieved json from API call""" data = response["items"][0] return { "rep": data["reputation"], "gold": data["badge_counts"]["gold"], "silver": data["badge_counts"]["silver"], "bronze": data["badge_counts"]["bronze"], }
1f914f8f16ac7c623ae554ac567399037752575b
27,169
def relativize_paths(root, paths): """ Take a list of fully-qualified paths and remove the root, thus making them relative to something. """ clean_paths = [] for path in paths: clean_paths.append(path.replace("%s/" % root, '')) return clean_paths
12487e109b281fd886c37f4673e22ffad4d6fe21
27,183
def runner(app): """Using test app, create and return a CLI runner object.""" return app.test_cli_runner()
67284844e1e2be40dd3dbbed58b04fdbf295a83c
27,184
def _is_descendant(index, target): """ Return true if index is a parent of target. """ while target.isValid(): if target == index: return True target = target.parent() return False
f97dd888fa9506a57bc81730f2e449b834aafa96
27,187
def pybtex_unescape(string: str) -> str: """ Reverts the escaping applied by Pybtex. :param str: Input string with pybtex-escape characters. :return: Output string where this has been reverted. """ return string.replace('\\_', '_').replace('\\textasciitilde ', '~')
0c8312ac82c360282369a4e4a05ea9b5899550d8
27,194
def isPointInsideRect(x, y, rect): """ This function determines if the point x, y is within the rectangle rect. Returns TRUE if it is and FALSE if it is not. """ if ((x > rect.left) and (x < rect.right) and (y > rect.top) and (y < rect.bottom)): return True else: return False
984855f6fa85912b323806ebba3b98c7c6aae441
27,195
import torch def zero_loss(*args, **kwargs): """Dummy loss that always returns zero. Parameters ---------- args : list Can take any number of positional arguments (without using them). kwargs : dict Can take any number of keyword arguments (without using them). Returns ------- loss : torch.tensor torch.tensor(0) """ return torch.tensor(0)
2c4821e6fb1e443258f7fc482f2630c16dedb3a2
27,197
def get_attribute_from_tag(tag, attribute): """Returns a xml attribute from a given tag""" element = None try: element = tag.attrib[attribute] except KeyError: pass # print("Error: attribute {} was not defined in this tag.".format(e)) return element
914e04f2e6441ffb5f42d71de49bd99fe5e3092b
27,207
def fib (n: int) -> int: """ Returns the n'th Fibonacci number. Note that this function uses zero style indexing, meaning that the first Fibonacci number has an index of zero (n=0).""" f_p, f_c= 0, 1 for i in range(n): f_n = f_c + f_p f_p = f_c f_c = f_n return f_c
431c0ba58b2dfb25eca03c14890d7b6ecab88f0d
27,213
def get_project_ids(config, modeling_application): """ Get a list of the ids of models in the source database for a particular modeling application (e.g., XPP) Args: config (:obj:`dict`): configuration modeling_application (:obj:`str`): modeling application (e.g., ``XPP``) Returns: :obj:`list` of :obj:`int`: ids of projects """ response = config['source_session'].get(config['source_api_endpoint'] + '/models?modeling_application={}'.format(modeling_application)) response.raise_for_status() projects = response.json() projects.sort() return projects
c3ebe7f74a1eb27dbbcd316002ad91d2ebaa4bf6
27,216
def construct_filter_based_on_destination(reimbursable_destination_type: str): """ Construct Filter Based on Destination :param reimbursable_destination_type: Reimbusable Destination Type :return: Filter """ filters = {} if reimbursable_destination_type == 'EXPENSE_CATEGORY': filters['destination_expense_head__isnull'] = True elif reimbursable_destination_type == 'ACCOUNT': filters['destination_account__isnull'] = True return filters
73116ce3e6398e98c1540380094e70cc1620867b
27,221
import random def natural(values, classes=5, maxsize=1000, samples=3): """ Jenks Optimal (Natural Breaks) algorithm implemented in Python. The original Python code comes from here: http://danieljlewis.org/2010/06/07/jenks-natural-breaks-algorithm-in-python/ and is based on a JAVA and Fortran code available here: https://stat.ethz.ch/pipermail/r-sig-geo/2006-March/000811.html Returns class breaks such that classes are internally homogeneous while assuring heterogeneity among classes. For very large datasets (larger than maxsize), will calculate only on subsample to avoid exponential runtimes. Calculated multiple times (samples) and takes the average break values for better consistency. Lower and higher bounds are kept intact. """ #values = sorted(values) # maybe not needed as is already done main.py # if too few values, just return breakpoints for each unique value, ignoring classes if len(values) <= classes: return list(values) + [values[-1]] def getbreaks(values, classes): # the original algorithm by Carson Farmer mat1 = [] for i in range(0,len(values)+1): temp = [] for j in range(0,classes+1): temp.append(0) mat1.append(temp) mat2 = [] for i in range(0,len(values)+1): temp = [] for j in range(0,classes+1): temp.append(0) mat2.append(temp) for i in range(1,classes+1): mat1[1][i] = 1 mat2[1][i] = 0 for j in range(2,len(values)+1): mat2[j][i] = float('inf') v = 0.0 for l in range(2,len(values)+1): s1 = 0.0 s2 = 0.0 w = 0.0 for m in range(1,l+1): i3 = l - m + 1 val = float(values[i3-1]) s2 += val * val s1 += val w += 1 v = s2 - (s1 * s1) / w i4 = i3 - 1 if i4 != 0: for j in range(2,classes+1): if mat2[l][j] >= (v + mat2[i4][j - 1]): mat1[l][j] = i3 mat2[l][j] = v + mat2[i4][j - 1] mat1[l][1] = 1 mat2[l][1] = v k = len(values) kclass = [] for i in range(0,classes+1): kclass.append(0) kclass[classes] = float(values[len(values) - 1]) kclass[0] = float(values[0]) countNum = classes while countNum >= 2: id = int((mat1[k][countNum]) - 2) kclass[countNum - 1] = values[id] k = int((mat1[k][countNum] - 1)) countNum -= 1 return kclass # Automatic sub sampling for large datasets # The idea of using random sampling for large datasets was in the original code. # However, since these samples tend to produce different results, # ...to produce more stable results we might as well calculate the # ...breaks several times and using the sample means for the final break values. if len(values) > maxsize: allrandomsamples = [] for _ in range(samples): randomsample = sorted(random.sample(values, maxsize)) # include lower and higher bounds to ensure the whole range is considered randomsample[0] = values[0] randomsample[-1] = values[-1] # get sample break tempbreaks = getbreaks(randomsample, classes) allrandomsamples.append(tempbreaks) # get average of all sampled break values jenksbreaks = [sum(allbreakvalues)/float(len(allbreakvalues)) for allbreakvalues in zip(*allrandomsamples)] else: jenksbreaks = getbreaks(values, classes) return jenksbreaks
8ba4085847bb1dbb70433a0af1491c83dccc39d9
27,225
def calc_url_by_from_date(url, period1_sec, period2_sec): """This function takes the url and the start and ens periods and insert them in the relevant place in the url """ prefix = url.split('history?', 1)[0] + 'history?' periods = 'period1=' + str(period1_sec) + '&period2=' + str(period2_sec) suffix = '&' + url.split('&', 2)[2] return prefix+periods+suffix
404c7a7259c2edf1dbc565ea21b8158d33b8b1ba
27,228
def containsNonAlphaNum(word_list): """ Does list of words contain any special characters? Parameters: word_list: list of words Returns: bool: whether any word in the list contains a special character """ chars = ["-", "_", "."] allow_chars = set(chars) for word in word_list: for char in word: if not(char.isalnum()) and char not in allow_chars: return True return False
7a87c706eaec5cd4ee10fa0ccc027a4850430eb3
27,230
def graph_codes(dataframe,code): """A function that takes the dataframe, NAICS code(string). Graphs both the original and the LQ of the code using hvplot. Only necessary arguments are dataframe and code. """ return dataframe.hvplot(c= code, tiles='OSM', title= code, alpha=0.6) + dataframe.hvplot(c= 'LQ_' + code, tiles='OSM', title= 'LQ_' + code, alpha=0.6)
ad403862e136f83dd4e334cd0cdf47fa8cc5df4c
27,231
def _get_user_id_from_session(session): """ Get user's identifier from the session. It could be their id or username since both are unique. """ user_sess_id = session.session_token.get("sub") if user_sess_id: try: user_sess_id = int(user_sess_id) except ValueError: # if we can't cast to an int, don't. could be username pass return user_sess_id
759249581ea54660968958ceed1dd29c5cf1c247
27,232
def str_fsize(sz): """ Formats file size as string (i.e., 1.2 Mb) """ if sz < 1024: return '%.1f bytes' % sz sz /= 1024.0 if sz < 1024: return '%.1f KB' % sz sz /= 1024.0 if sz < 1024: return '%.1f MB' % sz sz /= 1024.0 if sz < 1024: return '%.1f GB' % sz sz /= 1024.0 return '%.1f TB' % sz
6e42389ab96678d595c3c0bd11441e6fa5c96810
27,234
def get_position_of_one(min: float, max: float) -> float: """ Maps 1.0 value in range min-max to range 0-1 :param min: minimum :param max: maximum :return: """ mm = max - min return (1.0 - min) / mm
773810b54e8e84b108185468c6e1e9c416ee08ce
27,235
def vector(a, b): """ Return the vector between a and b """ return b - a
8d55f178bf67c7dfd48d1e8def8af798c6593203
27,239
def decode_integer_big_endian_4(frame, start): """ Encode an integer in a byte array using big endian on 4 bytes. Args: frame: Source byte array start: Read start index Returns: Decoded integer value """ return (frame[start] << 24) | (frame[start + 1] << 16) | (frame[start + 2] << 8) | frame[start + 3]
00b7809b3f759d57628f1f70af801f6f7f4ccb41
27,245
def adda(a, b, c=0, d=0, e=0): """Add number b to number a. Optionally also add any of numbers c, d, e to the result. """ print(f"Function `adda` called with arguments a={a} b={b}", end="") if c: print(f" c={c}", end="") if d: print(f" d={d}", end="") if e: print(f" e={e}", end="") print() return a + b + c + d + e
50a1a923f2dd046114bf92caffe9ba770d062f43
27,249
def _looks_like_url(name): """ Function tries to determine if input argument looks like URL and not like S3 bucket name. :param name - input name :return: True, if name looks like URL; False otherwise. """ if name.endswith('.com'): return True if name.find(':') >= 0: # Assume it is port number return True return False
d65eff615612fd087ec48a847347be6a5cefef91
27,251
import pickle def load_from_file(fn): """ Wczytanie danych zapisanych wcześniej przez save_to_file. :param fn: nazwa pliku. :return: wczytane dane. """ return pickle.loads(open(fn, "rb").read())
b25a8063b0e7d77bf2725b2657fc7e65473317bd
27,253
def multi_replace(s, rep_dict): """ Replace multi strings Parameter --------- s: string The string need to be replaced rep_dict: dict The replace patterns, {old: new} Return ------ s: string The replaced string """ for pattern in rep_dict: s = s.replace(pattern, rep_dict[pattern]) return s
01ce4d83b12d60252b01681adc89c12a2f8b3e91
27,257
def generate_gene(base, indexes) -> str: """ Takes a base string and indices of where the base string to be added to the cumulative string and returns constructed gene """ gene = base for i in indexes: gene = gene[:i + 1] + gene + gene[i + 1:] return gene
a5cdca4d70e111003defe8286733ccc36325a66d
27,263
import math def get_meansd(values): """Returns mean and standard deviation of the specified values.""" if not values: return float('nan'), float('nan') mean = sum(values) / len(values) sd = math.sqrt(sum([(v - mean) ** 2 for v in values]) / len(values)) return mean, sd
77f69a86a886f3ea7f814f91a02a362fee2e3588
27,265
def get_short_object_id(cid): """return shortened contribution ID (ObjectId) for `cid`. >>> get_short_object_id('5a8638add4f144413451852a') '451852a' >>> get_short_object_id('5a8638add4f1400000000000') '5a8638a' """ length = 7 cid_short = str(cid)[-length:] if cid_short == '0'*length: cid_short = str(cid)[:length] return cid_short
8e39f877f3b8914401d898aa5423b4fa2360aacf
27,272
def _make_scales(notes): """ Utility function used by Sound class for building the note frequencies table """ res = dict() for note, freq in notes: freq = round(freq) for n in note.split('/'): res[n] = freq return res
21ebc91750f231fa3d25f10f59d55c921940db4b
27,273
import torch def interp(x: torch.Tensor, xp: torch.Tensor, fp: torch.Tensor) -> torch.Tensor: """Interpolate ``x`` tensor according to ``xp`` and ``fp`` as in ``np.interp``. This implementation cannot reproduce numpy results identically, but reasonable. Code referred to `here <https://github.com/pytorch/pytorch/issues/1552#issuecomment-926972915>`_. Args: x: the input tensor that needs to be interpolated. xp: the x-coordinates of the referred data points. fp: the y-coordinates of the referred data points, same length as ``xp``. Returns: The interpolated values, same shape as ``x``. """ if x.dim() != xp.dim() != fp.dim() != 1: raise ValueError( f"Required 1D vector across ``x``, ``xp``, ``fp``. Got {x.dim()}, {xp.dim()}, {fp.dim()}." ) slopes = (fp[1:] - fp[:-1]) / (xp[1:] - xp[:-1]) locs = torch.searchsorted(xp, x) locs = locs.clip(1, len(xp) - 1) - 1 return slopes[locs] * (x - xp[locs]) + xp[locs]
b18c373d8d1bfc8da736a0322d6abcc41af3cde0
27,279
def get_pr_labels(pull): """Gets PR labels as set""" pr_lables = set() for label in pull.labels: pr_lables.add(label.name) return pr_lables
be807aeddddfd642412ba9e1455085af1c67ca7d
27,282
def all_image_ids(images): """Given images data, return a dict of all image IDs therein. :param images: images data from Twistlock, decoded from json :return: A dict whose keys are the IDs of the images, and values are the tag for that image (if present) """ results = {} for image in images: info = image['info'] results[info['id']] = info['repoTag']['tag'] return results
829614ea917521b6292754a6aa29be0486360a31
27,284
def addNodeMI(nm_dict, node, value): """ Add motif information to node dictionary. Input: nm_dict: (dictionary) the dictionary of node motif degree node: (int) the id of node value: (int) the change value of node Output: nm_dict: (dictionary) changed node motif degree dictionary """ if node not in nm_dict.keys(): nm_dict[node] = value else: nm_dict[node] += value return nm_dict
67fe86aefcf8c54feaa72e07a15f2be65b916fc1
27,286
def dict_without_keys(d, *omitkeys): """ Returns a copy of a dict without the specified keys Args: d (dict): A dict that to omit keys from *omitkeys: Variable length list of keys to omit Returns: dict: A dict with omitted keys """ return {key: d[key] for key in d.keys() if key not in omitkeys}
75b04a1ef3f2848e390b065f2df12eedec8919ea
27,290
def get_root_nodes(config_yaml): """ List the root nodes defined in the given taxonomy config. Args: config_yaml: configuration content Returns: list of root nodes """ root_nodes = [] for root_node in config_yaml['Root_nodes']: root_nodes.append(root_node['Node']) return root_nodes
8f336e87ec0688ab15ca91c032f6921175ab021b
27,292
def hard_dedupe(s): """Takes in a string, and returns a string where only the first occurrence of each letter is retained. So the string 'abccba' goes to 'abc'. The empty string returns the empty string.""" seen = set() ans = '' for char in s: if char in seen: continue seen.add(char) ans += char return ans
58104d46d19c1de713fc86ff08ea36383dc5598e
27,294
import re def standardize_whitespace(string: str) -> str: """ Removes leading, trailing, and repeat whitespace from a given *string*. """ return re.sub(r"\s+", " ", string.strip())
499f361101d45eea82928dafef8b18a87d910841
27,297
def get_compatibility_list_from_parser(parser): """ Extract the docstring information (if any) about compatible versions :param parser: a parser function :return: list of versions """ docstring = parser.__doc__ if docstring is None: return None lines = docstring.split('\n') for line in lines: if 'compatibility' in line.lower(): try: return [version.strip() for version in line.split(':')[1].split(',')] except IndexError: continue return None
25f83a29f684739c9989ffa03119ca3f880514f3
27,309
def map_SWAS_var2GEOS_var(var, invert=False): """ Map variables names from SWAS to GEOS variable names """ d = { # '1_3_butadiene':, # '1_butene':, # '2_3_methylpentane':, # '224_tmp':, 'acetaldehyde': 'ALD2', 'acetone': 'ACET', # 'acetylene':, 'benzene': 'BENZ', # GEOSChem, but not GEOS-CF output # 'benzenechb':, 'cis_2_butene': 'PRPE', # NOTE: lumped tracer for >= C3 alkenes 'cyclo_pentane': 'ALK4', # NOTE: lumped tracer for >= C4 Alkanes 'dms': 'DMS', # GEOSChem, but not GEOS-CF output # 'dmschb':, 'ethane': 'C2H6', # 'ethene':, # 'ethylbenzene':, # 'extra_1':, # 'extra_2':, # 'extra_3':, # 'extra_4':, # 'extra_5':, # 'extra_l2':, 'iso_butane': 'ALK4', # NOTE: lumped tracer for >= C4 Alkanes 'iso_butene': 'PRPE', # NOTE: lumped tracer for >= C3 alkenes 'iso_pentane': 'ALK4', # NOTE: lumped tracer for >= C4 Alkanes 'isoprene': 'PRPE', # NOTE: lumped tracer for >= C3 alkenes 'methanol': 'MOH', 'mp_xylene': 'XYLE', 'n_butane': 'ALK4', # NOTE: lumped tracer for >= C4 Alkanes 'n_heptane': 'ALK4', # NOTE: lumped tracer for >= C4 Alkanes 'n_hexane': 'ALK4', # NOTE: lumped tracer for >= C4 Alkanes 'n_octane': 'ALK4', # NOTE: lumped tracer for >= C4 Alkanes 'n_pentane': 'ALK4', # NOTE: lumped tracer for >= C4 Alkanes 'o_xylene': 'XYLE', 'pent_1_ene': 'PRPE', # NOTE: lumped tracer for >= C3 alkenes 'propane': 'C3H8', 'propene': 'PRPE', # NOTE: lumped tracer for >= C3 alkenes 'toluene': 'TOLU', # 'toluenechb':, 'trans_2_butene': 'PRPE', # NOTE: lumped tracer for >= C3 alkenes 'trans_2_pentene': 'PRPE', # NOTE: lumped tracer for >= C3 alkenes } # Invert the dictionary? if invert: d = {v: k for k, v in list(d.items())} return d[var]
89ee80ff698aa17c04b589c3068b5b584adea95b
27,312
def is_valid_bdf_multiplier(multiplier): """check if the detect_multiplier is valid""" if multiplier < 3 or multiplier > 50: return False return True
d6c9b96611f42d2004f00fd334d5ae8773aecb94
27,315
import re def _can_skip_tests(file_names, triggers): """ Determines if tests are skippable based on if all files do not match list of regexes :param file_names: list of changed files generated by _get_changed_files() :param triggers: list of regexes matching file name that indicates tests should be run :return: safe to skip tests """ for file_name in file_names: if any(re.match(trigger, file_name) for trigger in triggers): return False return True
de3f9c66332b8b6cc1c788233a6db0462fb38470
27,322
def format_puzzle(grid): """Formats the puzzle for printing.""" puzzle = "" for line in grid: puzzle += " ".join(line) + "\n" return puzzle
0b6017d0e9d8a2d29cfe83ef3c20c4cdf535d035
27,326
import random def roll(add=0): """A quick function to roll a d20 and add the modifier""" return random.randint(1, 20) + add
e8afe08b270ac2488ded0e6c9fa9574d406d9005
27,334
def yn_prompt(question: str, yes=None, no=None) -> bool: """Ask yes-no question. Args: question: Description of the prompt yes: List of strings interpreted as yes no: List of strings interpreted as no Returns: True if yes, False if no. """ if not yes: yes = ["yes", "ye", "y"] if not no: no = ["no", "n"] prompt = question if not prompt.endswith(" "): prompt += " " prompt += "[{} or {}] ".format("/".join(yes), "/".join(no)) print(prompt, end="") while True: choice = input().lower().strip() if choice in yes: return True elif choice in no: return False else: print( "Please respond with '{}' or '{}': ".format( "/".join(yes), "/".join(no) ), end="", )
3338493b42b118d9aacadff70dab3738643b538a
27,341
def hlstr(string, color="white"): """ Return HTML highlighting text with given color. Args: string (string): The string to render color (string): HTML color for background of the string """ return f"<mark style=background-color:{color}>{string} </mark>"
52e9d069d559feec2237d0847e667a1cb53326d8
27,343
def make_valid_did(lfn_dict): """ When managing information about a LFN (such as in `rucio upload` or the RSE manager's upload), we add the `filename` attribute to record the name of the file on the local disk in addition to the remainder of the DID information. This function will take that python dictionary, and strip out the additional `filename` key. If this is not done, then the dictionary will not pass the DID JSON schema validation. """ lfn_copy = dict(lfn_dict) lfn_copy['name'] = lfn_copy.get('name', lfn_copy['filename']) del lfn_copy['filename'] return lfn_copy
10e5777e5727e835683752aac197b43808d48fb4
27,353
def get_response(move) -> int: """ Utility function to enter the response to a move. :param move: The move to which to respond. :return: 1: hit, 2: hit and ship destroyed, 3: game over, you win, -1: miss """ print("Move made:", move) return int(input('What response to return?'))
cb115447fe5cf244b7a0bee424edeedb45b5ea42
27,354
import gzip def load_fasttext_class_probabilities(probability_file_path): """ Utility function that loads class probabilities from a previously performed prediction run. :param probability_file_path: str, path to the output file with class probabilities for the test dataset :return: list of float: probability of belonging to the positive class for each example in the test dataset """ probabilities = [] with gzip.open(probability_file_path, 'rt') as fin: for line in fin: cols = line.rstrip().split() prob = None for i, col in enumerate(cols): if col == '__label__1': prob = float(cols[i + 1]) assert prob is not None probabilities.append(prob) return probabilities
3ddff33c878b6ec013b2bfb8ff1e978f602a7d91
27,355
def get_package_names(pyproject): """ Get package names :param dict pyproject: pyproject.toml body. :return: Package names :rtype: list """ package_names = [] for pkg in pyproject["package"]: if pkg["category"] == "main": package_names.append(pkg["name"]) return package_names
9d12deab1613c4780a7b53d6f13233b72b51cf23
27,356
import random def sim_detections(gt, tpr, fpr): """Simulates detection data for a set of ground truth cluster labels and an annotator with a specified TPR and FPR. Returns an array of with same length as input gt, where 1 indicates the simulated annotator detected a cluster and 0 indicates an undetected cluster. Args: gt (array): Array of ground truth cluster labels. 1 indicates a true detection and 0 indicates a false detection. tpr (float): The true positive rate of the annotator. For a ground truth value of 1, it is the probability that the function will output 1, indicating that the simulated annotator detected the true cluster. fpr (float): The false positive rate of the annotator. For a ground truth value of 0, it is the probability that the funciton will output 1, indicating that the simulated annotator falsely detected the cluster. Returns: array: Array of detected cluster labels. A value of 1 indicates that a cluster was detected by the annotator, and 0 indicates that the cluster was not detected by the annotator. """ assert tpr >= 0 and tpr <= 1, "TPR must be between 0 and 1" assert fpr >= 0 and fpr <= 1, "FPR must be between 0 and 1" det_list = [] for item in gt: rand = random.random() if item == 1: if rand < tpr: det_list.append(1) else: det_list.append(0) elif item == 0: if rand < fpr: det_list.append(1) else: det_list.append(0) return det_list
ad8d0ac4423333c64ab1db8b838cba4ed7da4291
27,357
def update_csv_data_dict(csv_data, first_column, *other_columns): """ Update a csv dictionary for a given first column :param csv_data: The csv data dictionary to add the row to. :param first_column: The first column of the row to add. :param *other_columns: The further columns of the row to add. """ line = first_column for column in other_columns: line += ',%s' % ('' if column is None else column) csv_data[first_column] = line return csv_data
34abbef2c026bc520a7f4048cd00b6710414294d
27,361
import unittest def run_student_tests(print_feedback=True, show_traces=True, success_required=True): """Run a suite of student submitted tests. Tests must be located in /autograder/source/student_tests/ Args: print_feedback (bool): Print success or failure message show_traces (bool): Show failure/error stack traces success_required (bool): If True, this function will raise an AssertionError if any student tests fail. Returns: bool: True if all tests pass, False otherwise Raises: AssertionError if success_required is true and any test fails. """ suite = unittest.defaultTestLoader.discover('student_tests', top_level_dir="./") result = unittest.TestResult() suite.run(result) succeeded = len(result.failures) == 0 and len(result.errors) == 0 if not succeeded: if print_feedback: print( "It looks like your submission is not passing your own tests:") if len(result.errors) > 0: print("Errors:") for error in result.errors: print(error[0]._testMethodName) if show_traces: print(error[1]) if len(result.failures) > 0: print("Failures:") for failure in result.failures: print(failure[0]._testMethodName) if show_traces: print(failure[1]) if success_required: raise AssertionError("Student tests failed.") else: if print_feedback: print("Submission passes student tests.") return succeeded
e262b6d5e8c74ca9085aa943a5d58670314d781d
27,362
import torch def rotated_box_to_poly(rotated_boxes: torch.Tensor): """ Transform rotated boxes to polygons Args: rotated_boxes (Tensor): (x, y, w, h, a) with shape (n, 5) Return: polys (Tensor): 4 corner points (x, y) of polygons with shape (n, 4, 2) """ cs = torch.cos(rotated_boxes[:, 4]) ss = torch.sin(rotated_boxes[:, 4]) w = rotated_boxes[:, 2] - 1 h = rotated_boxes[:, 3] - 1 x_ctr = rotated_boxes[:, 0] y_ctr = rotated_boxes[:, 1] x1 = x_ctr + cs * (w / 2.0) - ss * (-h / 2.0) x2 = x_ctr + cs * (w / 2.0) - ss * (h / 2.0) x3 = x_ctr + cs * (-w / 2.0) - ss * (h / 2.0) x4 = x_ctr + cs * (-w / 2.0) - ss * (-h / 2.0) y1 = y_ctr + ss * (w / 2.0) + cs * (-h / 2.0) y2 = y_ctr + ss * (w / 2.0) + cs * (h / 2.0) y3 = y_ctr + ss * (-w / 2.0) + cs * (h / 2.0) y4 = y_ctr + ss * (-w / 2.0) + cs * (-h / 2.0) polys = torch.stack([x1, y1, x2, y2, x3, y3, x4, y4], dim=-1) polys = polys.reshape(-1, 4, 2) # to (n, 4, 2) return polys
52f0aa5f225006162bbbd676d1319477802cb49e
27,363
import torch def maybe_cuda(what, use_cuda=True, **kw): """ Moves `what` to CUDA and returns it, if `use_cuda` and it's available. Args: what (object): any object to move to eventually gpu use_cuda (bool): if we want to use gpu or cpu. Returns object: the same object but eventually moved to gpu. """ if use_cuda is not False and torch.cuda.is_available(): what = what.cuda() return what
ad9f8aa37c4d32000690768d5c18f327ff3bc76c
27,364
import logging def get_logger( log_level=logging.INFO, msg_format="%(asctime)s - %(levelname)s - %(filename)s:%(funcName)s:%(lineno)s" "- %(message)s", cls_name=__name__, ): """ Instantiate a new logger. Args: log_level = One of the log reporting levels defined in the logging module (Default: logging.INFO) cls_name = Class name for this logger (Default: __name__) """ logging.basicConfig(format=msg_format, level=log_level) return logging.getLogger(cls_name)
154518079694a57f44ebe2c83a5e19ff8c4b2396
27,365
def load_RIRE_ground_truth(file_name): """ Load the point sets defining the ground truth transformations for the RIRE training dataset. Args: file_name (str): RIRE ground truth file name. File format is specific to the RIRE training data, with the actual data expectd to be in lines 15-23. Returns: Two lists of tuples representing the points in the "left" and "right" coordinate systems. """ with open(file_name, "r") as fp: lines = fp.readlines() l = [] r = [] # Fiducial information is in lines 15-22, starting with the second entry. for line in lines[15:23]: coordinates = line.split() l.append( (float(coordinates[1]), float(coordinates[2]), float(coordinates[3])) ) r.append( (float(coordinates[4]), float(coordinates[5]), float(coordinates[6])) ) return (l, r)
9c7747b6fad1a10fb8cbb32162a3423e31fa40f3
27,368
def subdivide(x_1, x_2, n): """Performs the n-th cantor subdivision of the interval (x_1, x_2), a subset [0, 1]""" if n == 0: return [] new_x_1 = 2 * (x_1 / 3) + x_2 / 3 new_x_2 = x_1 / 3 + 2 * (x_2 / 3) return ( subdivide(x_1, new_x_1, n - 1) + [new_x_1, new_x_2] + subdivide(new_x_2, x_2, n - 1) )
ee2cc0ba214d363555224e4b70c10976c63d7dec
27,369
def _get_ref(info: list) -> str: """Get the workspace reference from an info tuple""" return f"{info[6]}/{info[0]}/{info[4]}"
fbd7bb479abc090b643fa1f1ecfcdd84dee18f62
27,373
import re import inspect def parse_pyvars(code: str, frame_nr: int = 2): """Looks through call stack and finds values of variables. Parameters ---------- code : str SuperCollider command to be parsed frame_nr : int, optional on which frame to start, by default 2 (grandparent frame) Returns ------- dict {variable_name: variable_value} Raises ------ NameError If the variable value could not be found. """ matches = re.findall(r"\s*\^[A-Za-z_]\w*\s*", code) pyvars = {match.split("^")[1].strip(): None for match in matches} missing_vars = list(pyvars.keys()) stack = inspect.stack() frame = None try: while missing_vars and frame_nr < len(stack): frame = stack[frame_nr][0] for pyvar in pyvars: if pyvar not in missing_vars: continue # check for variable in local variables if pyvar in frame.f_locals: pyvars[pyvar] = frame.f_locals[pyvar] missing_vars.remove(pyvar) # check for variable in global variables elif pyvar in frame.f_globals: pyvars[pyvar] = frame.f_globals[pyvar] missing_vars.remove(pyvar) frame_nr += 1 finally: del frame del stack if missing_vars: raise NameError("name(s) {} not defined".format(missing_vars)) return pyvars
381d698d3905ee306f75ad888d07c1107e398251
27,374
def _get_lines(file_obj): """Return all the lines in file_obj.""" return [line.strip() for line in file_obj.readlines()]
07b2bcab4ad9f4f48e5c633752c088791eee4c2d
27,380
def discovery_topic_is_device_config(topic): """Return True if the discovery topic is device configuration.""" return topic.endswith("config")
ffb313d9a35312bf8dd6f9c5ad97a9b1dc9ab2fd
27,387
def format_class_name(name): """ Formats a string to CapWords. :param name: string to format :type name: str :return: string with the name in CapWords :rtype: str """ fixed_name = name.title().replace("_", "") return fixed_name
064369048205aaf5afe872844ceff4605b6f1498
27,397
import time def wait_on_job(job): """Block until the async job is done.""" while job.status() != 'done': time.sleep(.05) # pragma: no cover return job
e08190d6a8dee960e7e3f3490ed934203f0aa7ff
27,405