content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def is_response_paginated(response_data): """Checks if the response data dict has expected paginated results keys Returns True if it finds all the paginated keys, False otherwise """ try: keys = list(response_data.keys()) except AttributeError: # If we can't get keys, wer'e certainly not paginated return False return set(keys) == set([u'count', u'next', u'previous', u'results'])
521c28c1d6e29e5785b3bcbd5d2604210b3a3874
702,076
def compute_power(rawvolts, rawamps): """ Compute the power. Looks trivial, but I'm gonna implement smoothing later. """ power = rawvolts * 1.58 power_low = rawvolts * 1.51 power_high = rawvolts * 1.648 return power, power_low, power_high
9202e456d4655de12ec5608011e80647cf62f7ab
702,078
import csv def read_aa_losses(filename): """ Read AA losses from data file. (assume fixed structure...) """ aa_losses = {} with open(filename, 'r') as f: reader = csv.reader(f, delimiter=',') next(reader) # skip headers for line in reader: if len(line) == 0: continue aa_id = line[1] aa_mono = float(line[4]) aa_avg = float(line[5]) aa_losses[aa_id.lower()] = (aa_mono, aa_avg) return aa_losses
b1ba8349d01d43112ef67436fa2bb09d3bed768c
702,080
def add(M1, M2): """ Returns a matrix Q, where Q[i][j] = M1[i][j] + M2[i][j]. M2 is replaced by Q. """ m = len(M1) n = len(M1[0]) for p in range(m): for q in range(n): M2[p][q] = M2[p][q] + M1[p][q] return M2
ac86e9109f6287cde062392992bf64dbf49614f5
702,081
def get_config_by_url(cfg, url): """ :param cfg: dictionary :param url: path to value separated by dot, e.g, key1.key2.key3 :return: value from dictionary """ keys = url.split('.') for key in keys: cfg = cfg[key] return cfg
0dd2a01a2ecd198f044eebcd3f854f96dbf945bd
702,083
def imq_kernel(x, y, score_x, score_y, g=1, beta=0.5, return_kernel=False): """Compute the IMQ Stein kernel between x and y Parameters ---------- x : torch.tensor, shape (n, p) Input particles y : torch.tensor, shape (n, p) Input particles score_x : torch.tensor, shape (n, p) The score of x score_y : torch.tensor, shape (n, p) The score of y g : float Bandwidth beta : float Power of the kernel return_kernel : bool whether the original kernel k(xi, yj) should also be returned Return ------ stein_kernel : torch.tensor, shape (n, n) The linear Stein kernel kernel : torch.tensor, shape (n, n) The base kernel, only returned id return_kernel is True """ _, p = x.shape d = x[:, None, :] - y[None, :, :] dists = (d ** 2).sum(axis=-1) res = 1 + g * dists kxy = res ** (-beta) scores_d = score_x[:, None, :] - score_y[None, :, :] temp = d * scores_d dkxy = 2 * beta * g * (res) ** (-beta - 1) * temp.sum(axis=-1) d2kxy = 2 * ( beta * g * (res) ** (-beta - 1) * p - 2 * beta * (beta + 1) * g ** 2 * dists * res ** (-beta - 2) ) k_pi = score_x.mm(score_y.T) * kxy + dkxy + d2kxy if return_kernel: return k_pi, kxy return k_pi
08c95b04789f47b557645df975a20c1f1b478a0d
702,091
def build_rgb_and_opacity(s): """ Given a KML color string, return an equivalent RGB hex color string and an opacity float rounded to 2 decimal places. EXAMPLE:: >>> build_rgb_and_opacity('ee001122') ('#221100', 0.93) """ # Set defaults color = '000000' opacity = 1 if s.startswith('#'): s = s[1:] if len(s) == 8: color = s[6:8] + s[4:6] + s[2:4] opacity = round(int(s[0:2], 16)/256, 2) elif len(s) == 6: color = s[4:6] + s[2:4] + s[0:2] elif len(s) == 3: color = s[::-1] return '#' + color, opacity
06cb729338584c9b3b934a844f5a2ec53245e967
702,092
def url_add_api_key(url_dict: dict, api_key: str) -> str: """Attaches the api key to a given url Args: url_dict: Dict with the request url and it's relevant metadata. api_key: User's API key provided by US Census. Returns: URL with attached API key information. """ return url_dict['url']+f'&key={api_key}'
1442d0f67a1f3603205870d1af0baf30eb3f1d50
702,095
def _compute_fans(shape): """Computes the fan-in and fan-out for a depthwise convolution's kernel.""" if len(shape) != 4: raise ValueError( 'DepthwiseVarianceScaling() is only supported for the rank-4 kernels ' 'of 2D depthwise convolutions. Bad kernel shape: {}' .format(str(shape))) receptive_field_size = shape[0] * shape[1] depth_multiplier = shape[3] fan_in = receptive_field_size fan_out = receptive_field_size * depth_multiplier return (fan_in, fan_out)
a33bfdf32080147f092d32fca1d70a90b2b25e91
702,097
def merge_shards(shard_data, existing): """ Compares ``shard_data`` with ``existing`` and updates ``shard_data`` with any items of ``existing`` that take precedence over the corresponding item in ``shard_data``. :param shard_data: a dict representation of shard range that may be modified by this method. :param existing: a dict representation of shard range. :returns: True if ``shard data`` has any item(s) that are considered to take precedence over the corresponding item in ``existing`` """ if not existing: return True if existing['timestamp'] < shard_data['timestamp']: # note that currently we do not roll forward any meta or state from # an item that was created at older time, newer created time trumps return True elif existing['timestamp'] > shard_data['timestamp']: return False new_content = False # timestamp must be the same, so preserve existing range bounds and deleted for k in ('lower', 'upper', 'deleted'): shard_data[k] = existing[k] # now we need to look for meta data updates if existing['meta_timestamp'] >= shard_data['meta_timestamp']: for k in ('object_count', 'bytes_used', 'meta_timestamp'): shard_data[k] = existing[k] else: new_content = True if (existing['state_timestamp'] == shard_data['state_timestamp'] and shard_data['state'] > existing['state']): new_content = True elif existing['state_timestamp'] >= shard_data['state_timestamp']: for k in ('state', 'state_timestamp', 'epoch'): shard_data[k] = existing[k] else: new_content = True return new_content
18704dd79274dd7ec6157cd28be04a5858e6cff7
702,098
def usage_percent(used, total, round_=None): """Calculate percentage usage of 'used' against 'total'.""" try: ret = (float(used) / total) * 100 except ZeroDivisionError: return 0.0 else: if round_ is not None: ret = round(ret, round_) return ret
dd707700de52020102ad51ad6f8494d0db489463
702,101
import re def is_valid_regex(string): """ Checks whether the re module can compile the given regular expression. :param string: str :return: boolean """ try: re.compile(string) is_valid = True except re.error: is_valid = False return is_valid
3893410afd8d3e6ed9310550159b35cc504dfffa
702,102
import calendar def year_add(date, years): """Add number of years to date. >>> import datetime >>> year_add(datetime.datetime(2016, 2, 29), 1) datetime.date(2017, 2, 28) >>> year_add(datetime.date(2016, 2, 29), 1) datetime.date(2017, 2, 28) >>> year_add(datetime.date(2015, 2, 28), 1) datetime.date(2016, 2, 28) >>> year_add(datetime.date(2017, 2, 28, -1) datetime.date(2016, 2, 28) >>> year_add(datetime.datetime(2016, 2, 29), -1) datetime.datetime(2015, 2, 28) """ if date.day == 29 and date.month == 2 and not calendar.isleap(date.year + years): return date.replace(day=28, year=date.year + years) return date.replace(year=date.year + years)
62be01b7051ddef27376ebae4b97f63e9b7ca979
702,106
def _get_command_powershell_script(command): """Return a valid CMD command that runs a powershell script.""" return "powershell -NonInteractive -NoLogo -File {}".format(command)
fdd67ac942e7869417c57f8021f26480228bc0a7
702,109
from pathlib import Path import json def latest_checkpoint(model_dir, model_name): """return path of latest checkpoint in a model_dir Args: model_dir: string, indicate your model dir(save ckpts, summarys, logs, etc). model_name: name of your model. we find ckpts by name Returns: path: None if isn't exist or latest checkpoint path. """ ckpt_info_path = Path(model_dir) / "checkpoints.json" if not ckpt_info_path.is_file(): return None with open(ckpt_info_path, "r") as f: ckpt_dict = json.loads(f.read()) if model_name not in ckpt_dict["latest_ckpt"]: return None latest_ckpt = ckpt_dict["latest_ckpt"][model_name] ckpt_file_name = Path(model_dir) / latest_ckpt if not ckpt_file_name.is_file(): return None return str(ckpt_file_name)
29acafdb72bbb549cda7d72cc15a5a93f5535dca
702,113
def get_token_list(text): """Returns a list of tokens. This function expects that the tokens in the text are separated by space character(s). Example: "ca n't , touch". This is the case at least for the public DiscoFuse and WikiSplit datasets. Args: text: String to be split into tokens. """ return text.split()
01a917fae5923cdfd693548bb688695a917fab70
702,115
def cut_string(string, limit=30): """Shorten the length of longer strings.""" if len(string) <= limit: return string else: return string[:limit-3] + '...'
842cfefcff84c4f146cc85a4e86dff1486e9a434
702,116
def heading(heading_string, underline='='): """ Takes a raw string and underlines it with the given underline char """ return '%s\n%s' % (heading_string, underline * len(heading_string))
369385ffef60b88ba7e3a5c376236f6d4043ac72
702,117
def profile_to_node(src_profile): """convert source profile to graph node.""" return (src_profile['uid'], src_profile)
970d349c2884dd57d10bef8f7e2649509e480a62
702,122
def matchnocase(word, vocab): """ Match a word to a vocabulary while ignoring case :param word: Word to try to match :param vocab: Valid vocabulary :return: >>> matchnocase('mary', {'Alice', 'Bob', 'Mary'}) 'Mary' """ lword = word.lower() listvocab = list(vocab) # this trick catches dict and set in addition to list lvocab = [w.lower() for w in listvocab] if lword in lvocab: return listvocab[lvocab.index(lword)] return None
ba0354d7669d08fbdedc926c11f446c26f401e89
702,130
from datetime import datetime def get_first_timestamp(log_file, search_text): """Get the first timestamp of `search_text' in the log_file Args: log_file search_text (str) Returns: timestamp: datetime object """ timestamp = None with open(log_file, "r") as f: content = [next(f) for x in range(51)] content = [x.strip().split() for x in content] for log_line in content: # Extract text text = ' '.join(log_line[3:-1]) if search_text in text.lower(): # Extract date for this text date_time_str = ' '.join(log_line[0:2])[1:-1] timestamp = datetime.strptime(date_time_str, '%Y-%m-%d %H:%M:%S.%f') break return timestamp
fbf5f00ea0810788019ec081a67664393763a95c
702,131
def get_fetaure_names(df, feature_name_substring) : """ Returns the list of features with name matching 'feature_name_substring' """ return [col_name for col_name in df.columns if col_name.find(feature_name_substring) != -1]
14103620e89b282da026fd9f30c7491b63820c09
702,132
import numbers def ISNUMBER(value): """ Checks whether a value is a number. >>> ISNUMBER(17) True >>> ISNUMBER(-123.123423) True >>> ISNUMBER(False) True >>> ISNUMBER(float('nan')) True >>> ISNUMBER(float('inf')) True >>> ISNUMBER('17') False >>> ISNUMBER(None) False >>> ISNUMBER(datetime.date(2011, 1, 1)) False More tests: >>> ISNUMBER(AltText("text")) False >>> ISNUMBER('') False """ return isinstance(value, numbers.Number)
422c5bcd24a21a50bfefb1a00193387e725d435b
702,133
def eval_multiple(exprs,**kwargs): """Given a list of expressions, and keyword arguments that set variable values, returns a list of the evaluations of the expressions. This can leverage common subexpressions in exprs to speed up running times compared to multiple eval() calls. """ for e in exprs: e._clear_eval_cache() return [e._eval([],kwargs) for e in exprs]
2bc90dacb972d3315168638a4ea99f9cfbb13830
702,136
import requests def get(url, params, proxies, headers): """Send a request with the GET method.""" response = requests.get(url, params=params, proxies=proxies, headers=headers) return response
a481a91e5f3fc71f88de8d84efaac3dd666c302e
702,137
def action_from_trinary_to_env(action) -> int: """ Maps trinary model output to int action understandable by env """ assert action in (0, 1, 2), f'Wrong action: {action}' return { 0: 0, 1: 2, 2: 5 }[action]
e2a7cd3d6c018a7112e2304f2910781b665a8247
702,139
import hashlib def file_md5(file_path: str) -> str: """Compute the md5 hex digest of a binary file with reasonable memory usage.""" md5_hash = hashlib.md5() with open(file_path, 'rb') as f: while True: data = f.read(65536) if not data: break md5_hash.update(data) return md5_hash.hexdigest()
ac0576e901ca3205f824a599566e628ee29f5a7c
702,141
def pl_to_eng(unit: str) -> str: """Converts Polish terminology to English""" switcher = { "pikinier": "spear", "miecznik": "sword", "topornik": "axe", "lucznik": "archer", "zwiadowca": "spy", "lekki kawalerzysta": "light", "lucznik na koniu": "marcher", "ciezki kawalerzysta": "heavy", "taran": "ram", "katapulta": "catapult", "szlachcic": "snob" } return switcher.get(unit, "error")
7d9634c554cc84663b1c7fb787b070d535747d36
702,142
from typing import Dict from typing import List from pathlib import Path import yaml def write_commands_instructions( commands_instructions: Dict[str, List[str]], scene_path: Path, index: int ) -> Path: """*Deprecated, use `write_yaml_instructions` instead.* Writes a command instruction `yaml` file. These are the files that [`runner`](github.com/TrickyTroll/good-bot-runner) takes as input to type commands and expect stuff. Args: commands_instructions (Dict[str, List[str]]): A dictionary of commands and things to expect. Keys should be either `commands` or `expect`. The values should be a list of commands and a list of things to expect. scene_path (Path): The path towards the scene where the `commands_instructions` come from. index (int): The index of the command block. Returns: Path: The path towards where the new `yaml` file has been written. """ commands_path: Path = scene_path / Path("commands") file_path: Path = commands_path / Path(f"commands_{index + 1}").with_suffix(".yaml") to_write: str = yaml.safe_dump(commands_instructions) with open(file_path, "w") as stream: stream.write(to_write) return file_path
1a0cc75b9bcf269133b799f1abe4b507b2f42c16
702,147
import re def replace_simultaneous(s: str, r0: str, r1: str) -> str: """ Replace all instances of `r0` in `s` with `r1` and vice versa. This method does the replacements simultaneously so that, e.g., if you call `replace_simultaneous("apple banana", "apple", "banana")` the result will be `"banana apple"` instead of either `"apple apple"` or `"banana banana"` for a sequential replacement (depending on the order of the replacements). It is assumed that `r0` is not a substring of `r1` and vice versa. :param s: string to do replacements in :param r0: first string to replace :param r1: second string to replace """ replacement = {r0: r1, r1: r0} r0 = re.escape(r0) r1 = re.escape(r1) return re.sub(f"{r0}|{r1}", lambda x: replacement[x.group()], s)
bcfe2d13fb3b0c3156e866954d6ddf1a0a02e9fc
702,148
def retry_http(response): """Retry on specific HTTP errors: * 429: Rate limited to 50 reqs/minute. Args: response (dict): Dynatrace API response. Returns: bool: True to retry, False otherwise. """ retry_codes = [429] code = int(response.get('error', {}).get('code', 200)) return code in retry_codes
f45f6e9239d78cfa6ad0ec397e3a5b4a58a655f5
702,150
def circular_wetted_perimeter(angle, diameter): """Returns circle wetted perimeter. :param angle: angle in radians from angle_in_partial_filled_pipe function :param diameter: diameter of pipe [m] """ return angle * diameter
e24cc0839eb3bf78e65f6b9b99b34bf87f77b2cf
702,152
def is_dunder(attr_name: str) -> bool: """ Retuns whether the given attr is a magic/dunder method. :param attr_name: """ return attr_name.startswith("__") and attr_name.endswith("__")
b3a1f10e9fc7fd5c7dbb930be977a814e6b0c37d
702,156
def regularize_layer_weighted(layers, penalty, tags={'regularizable': True}, **kwargs): """ Computes a regularization cost by applying a penalty to a group of layers, weighted by a coefficient for each layer. Parameters ---------- layers : dict A mapping from : tuple of class:`Layer` instances to coefficients. penalty : callable tags: dict Tag specifications which filter the parameters of the layer or layers. By default, only parameters with the `regularizable` tag are included. Should be defined inside the penalty function **kwargs keyword arguments passed to penalty. Returns ------- Theano scalar a scalar expression for the cost """ return sum(coeff * sum(penalty(layer_tuple, tags, **kwargs)) for layer_tuple, coeff in list(layers.items()))
e4de182b3e99ace5fd41c05d2a318fc865a6dd9c
702,158
def convert_df_2_string(df): """ Convert data frame rows to string output where each new line is defined as \n """ # ititialise string output = 'agent,wkt\n' for i, row in df.iterrows(): if i == len(df) - 1: output += str(row['label']) + ',' + str(row['geometry']) else: output += str(row['label']) + ',' + str(row['geometry']) + '\n' # set environment variable ${AGENTS} # os.environ['AGENTS'] = output return output
c8d2717b72f875f0f4ae743a2cc6c82550221447
702,160
from typing import List from typing import Any import random def shuffle_list(elements: List[Any]) -> List[Any]: """ Shuffle the input list in random order :param elements: List to be reshuffled :return: reshuffled list """ random.shuffle(elements) return elements
17a6520fce91e60f1cfe59d31736c2e5f50ded6f
702,165
def merge_sort(array): """ ### Merge sort Implementation of one of the most powerful sorting algorithms algorithms.\n Return a sorted array, """ def merge(L, R): res = [] left_ind = right_ind = 0 while left_ind < len(L) and right_ind < len(R): if L[left_ind] < R[right_ind]: res.append(L[left_ind]) left_ind += 1 elif L[left_ind] > R[right_ind]: res.append(R[right_ind]) right_ind += 1 else: res.append(R[right_ind]) res.append(L[right_ind]) left_ind += 1 right_ind += 1 res.extend(L[left_ind:]) res.extend(R[right_ind:]) return res length = len(array) if length <= 1: return array middle_ind = int(length/2) right = merge_sort(array[middle_ind:]) left = merge_sort(array[:middle_ind]) return merge(right, left)
9f2101b0286525490aedce6d6f99962f5e6050f2
702,167
def split_col_row(ref): """Split the letter and number components of a cell reference. Examples: >>> split_col_row('A1') ('A', 1) >>> split_col_row('B100') ('B', 100) >>> split_col_row('CC12') ('CC', 12) """ head = ref.rstrip("0123456789") tail = ref[len(head) :] return head, int(tail)
ae236aa0521564958bd61643fbce932f1b8a2d99
702,169
import gzip def get_file_handle(file_path, compression): """ Returns a file handle to the given path. :param file_path: path to the file to open :param compression: indicates whether or not the input file is compressed :return: a file handle to file_path """ if compression: return gzip.open(file_path, 'rt', encoding='utf-8', errors='strict') else: return open(file_path, 'rt', encoding='utf-8', errors='strict')
44c97b211c4b44679934eede62845c58947c4091
702,175
def save_params(net, best_metric, current_metric, epoch, save_interval, prefix): """Logic for if/when to save/checkpoint model parameters""" if current_metric < best_metric: best_metric = current_metric net.save_parameters('{:s}_best.params'.format(prefix, epoch, current_metric)) with open(prefix+'_best.log', 'a') as f: f.write('\n{:04d}:\t{:.4f}'.format(epoch, current_metric)) if save_interval and (epoch + 1) % save_interval == 0: net.save_parameters('{:s}_{:04d}_{:.4f}.params'.format(prefix, epoch, current_metric)) return best_metric
9af251965a4facc598c9a3d833f967511cc7b0ec
702,179
import operator def vector_add(a, b): """Component-wise addition of two vectors. >>> vector_add((0, 1), (8, 9)) (8, 10) """ return tuple(map(operator.add, a, b))
2144a02128ffa8712cfb998045ede1ca9308650f
702,185
import json def get_aws_key_and_secret(aws_creds_file_path): """ Given a filename containing AWS credentials (see README.md), return a 2-tuple (access key, secret key). """ with open(aws_creds_file_path, 'r') as f: creds_dict = json.load(f) return creds_dict['accessKeyId'], creds_dict['secretAccessKey']
b3eae6ee0283a7245d37f92b5a5f4ef1102e248d
702,186
def freq_id_to_stream_id(f_id): """ Convert a frequency ID to a stream ID. """ pre_encode = (0, (f_id % 16), (f_id // 16), (f_id // 256)) stream_id = ( (pre_encode[0] & 0xF) + ((pre_encode[1] & 0xF) << 4) + ((pre_encode[2] & 0xF) << 8) + ((pre_encode[3] & 0xF) << 12) ) return stream_id
f89d52adf4390f665e069c2b5f4f5accc22709b8
702,189
import dateutil.parser def get_entry_end_date(e): """Returns end date for entry""" return dateutil.parser.parse(e['time_end'])
a9b8bdae873de0ef97de49e342cd4f3bbd8117f6
702,191
from pathlib import Path def get_launch_agents_dir() -> Path: """Returns user LaunchAgents directory.""" launch_agents_dir = Path.home() / "Library" / "LaunchAgents" assert launch_agents_dir.is_dir() return launch_agents_dir
d11bc00c986bd5440549e71fdae9ed1f98f18d21
702,192
def yddot_d_z(mu, state, r_15_inv, r_25_inv): """ Partial of x acceleration with respect to z Args: mu (float): three body constant state (np.array): 6 dimensional state vector of (x, y, z, dx, dy, dz) r_15_inv (float): 1 / norm(r_1)^(5) where r_1 is the vector from the primary (i.e. Earth) to the satellite r_25_inv (float): 1 / norm(r_2)^(5) where r_2 is the vector from the secondary (i.e. Moon) to the satellite Returns: float """ x, y, z = state[:3] ans = 3 * mu * y * z * r_25_inv \ + 3 * (1 - mu) * y * z * r_15_inv return ans
18281dc5dffdef99e38c33e28cc26a0e9d9aa262
702,193
def decypher(text): """ Decypher file name into descriptive label for legend """ # name shortcuts help_dict = {"h": "Target/Hunt AI", "p": "Probabilistic AI", "r": "Random AI"} final = "" t_split = text.split("-") final += help_dict[t_split[0]] # hunt/target AI branch if t_split[0] == "h": if t_split[1] == "True": final += ", parity" if t_split[2] == "True": final += ", cheating" final += "(knows "+t_split[3]+" ships)" # probabilistic AI branch if t_split[0] == "p": if t_split[1] == "True": final += ", cheating" final += "(knows "+t_split[2]+" ships)" return final
bf4db039bcc86d8d874a29dbf301cc91fa461560
702,194
import re def compute_rq_type(oslevel, empty_list): """Compute rq_type. return: Latest when oslevel is blank or latest (not case sensitive) Latest when oslevel is a TL (6 digits) and target list is empty TL when oslevel is xxxx-xx(-00-0000) SP when oslevel is xxxx-xx-xx(-xxxx) ERROR when oslevel is not recognized """ if oslevel is None or not oslevel.strip() or oslevel.upper() == 'LATEST': return 'Latest' if re.match(r"^([0-9]{4}-[0-9]{2})$", oslevel) and empty_list: return 'Latest' if re.match(r"^([0-9]{4}-[0-9]{2})(|-00|-00-0000)$", oslevel): return 'TL' if re.match(r"^([0-9]{4}-[0-9]{2}-[0-9]{2})(|-[0-9]{4})$", oslevel): return 'SP' return 'ERROR'
753e54d4858a8d1248958c15bbd6b1a0cbc9b02e
702,196
import re def convert_input_paths(argo_json): """ argo aggregation is not valid json as properties are not enclosed in quotes: flow/step/[{task-id:flow-step-3119439657},{task-id:flow-step-195521861},{task-id:flow-step-3020891073}] Parameters ---------- argo_json Returns ------- list of task-ids to be consumed by metaflow join step: flow/step/:flow-step-3119439657,flow-step-195521861,flow-step-3020891073 """ flow, run_id, task_ids = argo_json.split('/') task_ids = re.sub('[\[\]{}]', '', task_ids) task_ids = task_ids.split(',') tasks = [t.split(":")[1] for t in task_ids] return '{}/{}/:{}'.format(flow, run_id, ','.join(tasks))
2a1e5ddd378546343d5532f304145cb2157244b5
702,197
from pathlib import Path def get_solar_charge_state() -> str: """ Gets the current state of the charging system Returns: The charge state object as a json string """ current_state = Path('current_state.json').read_text() return current_state
0621aff9e6ae77b48811b2879f644ff3e4e4ee91
702,200
def get_fixture_value(request, fixture_name): """ Returns the value associated with fixture named `fixture_name`, in provided `request` context. This is just an easy way to use `getfixturevalue` or `getfuncargvalue` according to whichever is available in current `pytest` version. :param request: :param fixture_name: :return: """ try: # Pytest 4+ or latest 3.x (to avoid the deprecated warning) return request.getfixturevalue(fixture_name) except AttributeError: # Pytest 3- return request.getfuncargvalue(fixture_name)
11e2b5f67595ecf102f7a8f28cc4aa151a8ebca5
702,201
def printSchoolYear(year1): """Return a print version of the given school year. """ if year1: return "%d–%d" % (year1 - 1, year1)
93879512567a2be3e3cf541b747178b422be0590
702,206
def dahua_brightness_to_hass_brightness(bri_str: str) -> int: """ Converts a dahua brightness (which is 0 to 100 inclusive) and converts it to what HASS expects, which is 0 to 255 inclusive """ bri = 100 if not bri_str: bri = int(bri_str) current = bri / 100 return int(current * 255)
d1d8d02f896edc4a16fbb1b26c99416b60764fc6
702,208
def fit_parabola(x1,x2,x3,y1,y2,y3): """Returns the parabola coefficients a,b,c given 3 data points [y(x)=a*x**2+b*x+c]""" denom = (x1-x2)*(x1-x3)*(x2-x3) a = (x3*(y2-y1)+x2*(y1-y3)+x1*(y3-y2))/denom b = (x1**2*(y2-y3)+x3**2*(y1-y2)+x2**2*(y3-y1))/denom c = (x2**2*(x3*y1-x1*y3)+x2*(x1**2*y3-x3**2*y1)+x1*x3*(x3-x1)*y2)/denom return a,b,c
e077f5a895e353d5b980b15bee603be5c34d3ec4
702,218
def generate_list(usb_dict: dict) -> list: """ wrapper for list conversion :param usb_dict: usb dictionary :return: list of usb devices for tui print """ devices = [] for usb in usb_dict.values(): devices.append(usb) return devices
8d1d106c7b9fd4078b0d78f0370bd9d22ecda368
702,219
def reset_line_breaks(curr_boundary={}): """ Builds a fresh line breaks dictionary while keeping any information provided concerning line boundaries. Parameters ---------- curr_boundary: dict Line boundaries to be preserved Returns ------- dict The newly initialized line breaks dictionary """ start = [] end = [] tokens = [] if "end" in curr_boundary: end = curr_boundary["end"] if "start" in curr_boundary: start = curr_boundary["start"] if "tokens" in curr_boundary: tokens = curr_boundary["tokens"] line_breaks = { "end": end, "pageBoundaries": {}, "start": start, "tokens": tokens } return line_breaks
da7f1fc0f206e8a39f0a0d42f1652a3c4bb23200
702,221
def stop(M, i): """ Check if the algorithm converged. :param M: input matrix :param i: iteration steo :return: boolean: True if converged """ # this saves time, so we dont have to do multiplication in the first 7 iterations if i > 6: M_temp = M ** 2 - M m = M_temp.max() - M_temp.min() if abs(m) < 1e-8: return True return False
3fc6cec40db8e52aed6e435a5552a8cf8edb68a1
702,222
def split_text(text, sep='\n'): """Split text.""" if isinstance(text, bytes): text = text.decode("utf8") return [elm.strip() for elm in text.split(sep) if elm.strip()]
5247919ab151b2e4d1b4e631046d7030e673a68a
702,228
import re def replace_image_link(target_str): """ Replace the shorthand of an image link { image.jpg } with the full link {{ img_tag("image.jpg") | safe }} :param target_str: String with images in it to be edited :return: string with images formatted as {{ img_tag("image.jpg") | safe }} """ # Find all image links as {image.jpg} or { image.jpg } etc... image_list = re.findall(r'{(\s*\w+\.\w+\s*)}', target_str) # Drop the first and last characters to get citation numbers. img_name_list = [cit.strip() for cit in image_list] for idx, img in enumerate(image_list): # Replace the citation shorthand with the proper markdown. target_str = target_str.replace(img, f'{{ img_tag("{img_name_list[idx]}") | safe }}') return target_str
d72cbaacecec7d2654a20f50098f21059130dbc3
702,230
def create_option(option, display, window, active=True): """ Returns an option `dict` to be used by lottus :param option `str`: the value of the option :param option `str`: the value that will be displayed :param window `str`: the name of the window that this option points to :param active `bool`: indicates wheter the option will be showed to the client """ return { 'option': option, 'display': display, 'window': window, 'active': active }
31eb71dee85a7876997d4b41914f974cbcfcf938
702,231
import re def remove_proximity_around_booleans(query_str): """ Clients like PEP-Web (Gavant) send fulltext1 as a proximity string. This removes the proximity if there's a boolean inside. We could have the client "not do that", but it's actually easier to remove than to parse and add. >>> a = '(article_xml:"dog AND cat"~25 AND body:"quick fox"~25) OR title:fox' >>> remove_proximity_around_booleans(a) '(article_xml:(dog AND cat) AND body:"quick fox"~25) OR title:fox' >>> a = 'body_xml:"Even and Attention"~25 && body_xml:tuckett' >>> remove_proximity_around_booleans(a) 'body_xml:"Even and Attention"~25 && body_xml:tuckett' """ srch_ptn = r'\"([A-z\s0-9\!\@\*\~\-\&\|\[\]]+)\"~25' changes = False while 1: m = re.search(srch_ptn, query_str) if m is not None: # does it have a boolean, a quote, or a bracket (range)? # n = re.search(r"\s(AND|OR|NOT|\&\&|\|\|)\s|([\"\[\']])", m.group(1), flags=re.IGNORECASE) # 2021-04-01 Booleans must be UPPERCASE now n = re.search(r"\s(AND|OR|NOT|\&\&|\|\|)\s|([\"\[\']])", m.group(1)) # if it's not None, then this is not a proximity match if n is not None: query_str = re.subn(srch_ptn, r'(\1)', query_str, 1)[0] else: # change it so it doesn't match next loop iter query_str = re.subn(srch_ptn, r'"\1"~26', query_str, 1)[0] changes = True else: if changes: # change proximity ranges back query_str = re.sub("~26", "~25", query_str) break return query_str
b89ac8ab52cf00f1902603c38bc3f4fdd47cbda2
702,232
def api_client(application, request): """ Fixture that returns api_client Parameters: app (Application): Application for which create the client. Returns: api_client (HttpClient): Api client for application """ def _api_client(app=application, **kwargs): client = app.api_client(**kwargs) request.addfinalizer(client.close) return client return _api_client
cf2894c8f8c2adb8a8700dfa1b9f3a99e86909d8
702,234
from functools import reduce def _is_num_tuple(t,size): """Returns: True if t is a sequence of numbers; False otherwise. If the sequence is not of the given size, it also returns False. Parameter t: The value to test Precondition: NONE Parameter size: The size of the sequence Precondition: size is an int >= 0 """ try: return len(t) == size and reduce(lambda x, y: x and y, map(lambda z: type(z) in [int, float], t)) except: return False
64b3795b8e90dc38a7c48cd177d8e2aaffc0aa3d
702,239
def jsonify_dict(d): """Turns python booleans into strings so hps dict can be written in json. Creates a shallow-copied dictionary first, then accomplishes string conversion. Args: d: hyperparameter dictionary Returns: hyperparameter dictionary with bool's as strings """ d2 = d.copy() # shallow copy is fine by assumption of d being shallow def jsonify_bool(boolean_value): if boolean_value: return "true" else: return "false" for key in d2.keys(): if isinstance(d2[key], bool): d2[key] = jsonify_bool(d2[key]) return d2
afbf5819fc4fda444076562b02deb22f8146f123
702,241
from pathlib import Path def _load_requirements(requirements_file, folder="requirements"): """Load requirements from a file.""" requirements = [] with open(Path(folder) / Path(requirements_file), "r") as f: for line in f: line = line.strip() if line and not line.startswith("#"): requirements.append(line) return requirements
e9d56a025986f9a2899b3d070033abcdeec21956
702,245
def get_emails(notification_rec): """ Get list of emails for users listed in the specified notification """ # Use a set instead of list as there could be duplicates. ret = [] for recipient in notification_rec.recipients.all(): ret.append(recipient.email) return ret
9c01b1e5615cf3a35fbda0c4d92a1e092cfc3d59
702,246
import six def text_type(string, encoding='utf-8'): """ Given text, or bytes as input, return text in both python 2/3 This is needed because the arguments to six.binary_type and six.text_type change based on if you are passing it text or bytes, and if you simply pass bytes to six.text_type without an encoding you will get output like: ``six.text_type(b'hello-world')`` which is not desirable. """ if isinstance(string, six.text_type): return six.text_type(string) else: return six.text_type(string, encoding)
5b962c348769ccb1029cd0d41fc23ddb6942d37d
702,247
def quote_value(value: str) -> str: """ Ensures values with ";" are quoted. >>> quote_value("foo") 'foo' >>> quote_value("foo;bar") '"foo;bar"' """ if value.find(";") != -1: return f'"{value}"' return value
e6bb23a17d554742115582feb90ba621ddd7fc66
702,248
def idx2token(idx, reverse_vocab): """ index换取词 :param idx: index :param reverse_vocab: 反查表 @see chatbot.build_vocab :return: 词 """ return reverse_vocab[idx]
4ce26e6a6a103133ffe0212d01a4c52a8a23479d
702,258
def message_from_lax(data): """ format a message from a Lax response data """ return data.get("message") if data.get("message") else "(empty message)"
81ba7399bc0e3e86ee1967988a17fd7f3524d8ab
702,259
import itertools def get_n_bits_combinations(num_bits: int) -> list: """ Function returning list containing all combinations of n bits. Given num_bits binary bits, each bit has value 0 or 1, there are in total 2**n_bits combinations. :param num_bits: int, number of combinations to evaluate :return: a list of length 2**n_bits, return[i] is the binary representation of the decimal integer. :Example: >>> from deepreg.model.layer_util import get_n_bits_combinations >>> get_n_bits_combinations(3) [[0, 0, 0], # 0 [0, 0, 1], # 1 [0, 1, 0], # 2 [0, 1, 1], # 3 [1, 0, 0], # 4 [1, 0, 1], # 5 [1, 1, 0], # 6 [1, 1, 1]] # 7 """ assert num_bits >= 1 return [list(i) for i in itertools.product([0, 1], repeat=num_bits)]
6813f76f856a639688d6b80ddce0e605707f8d1f
702,262
def get_centers(bins): """Return the center of the provided bins. Example: >>> get_centers(bins=np.array([0.0, 1.0, 2.0])) array([0.5, 1.5]) """ bins = bins.astype(float) return (bins[:-1] + bins[1:]) / 2
4f5b3454e1ef718302c7e5ea204954d498ca9e10
702,265
def hex_16bit(value): """Converts 16bit value into bytearray. args: 16bit value returns: bytearray of size 2 """ if value > 0xffff or value < 0: raise Exception('Sar file 16bit value %s out of range' % value) return value.to_bytes(2, 'little')
1c5aab076798b40459bf5afab73fd92e8dbb93a1
702,266
def transpose(matrix): """Transpose a list of lists. >>> transpose([['a', 'b', 'c'], ['d', 'e', 'f'], ['g', 'h', 'i']]) [['a', 'd', 'g'], ['b', 'e', 'h'], ['c', 'f', 'i']] >>> transpose([['a', 'b', 'c'], ['d', 'e', 'f']]) [['a', 'd'], ['b', 'e'], ['c', 'f']] >>> transpose([['a', 'b'], ['d', 'e'], ['g', 'h']]) [['a', 'd', 'g'], ['b', 'e', 'h']] """ result = zip(*matrix) # Convert list of tuples to list of lists. # map is faster than a list comprehension since it is being used with # a built-in function as an argument. result = map(list, result) return result
e96e7fbd074115a4291cc495c0251d1083f4592e
702,270
import torch def reduce(tensor: torch.Tensor, reduction: str) -> torch.Tensor: """Reduces the given tensor using a specific criterion. Args: tensor (torch.Tensor): input tensor reduction (str): string with fixed values [elementwise_mean, none, sum] Raises: ValueError: when the reduction is not supported Returns: torch.Tensor: reduced tensor, or the tensor itself """ if reduction in ("elementwise_mean", "mean"): return torch.mean(tensor) if reduction == 'sum': return torch.sum(tensor) if reduction is None or reduction == 'none': return tensor raise ValueError('Reduction parameter unknown.')
a77edd7f9a8486a8fd604b9a35c2ecfe28d43c8c
702,271
def load_cows(filename): """ Read the contents of the given file. Assumes the file contents contain data in the form of comma-separated cow name, weight pairs, and return a dictionary containing cow names as keys and corresponding weights as values. Parameters: filename - the name of the data file as a string Returns: a dictionary of cow name (string), weight (int) pairs """ # MY_CODE cow_dict = {} with open(filename, 'r') as f: for line in f: name, weight = line.split(',') cow_dict[name] = int(weight) return cow_dict
aa44df075a4aa8d44d37743b8a351ef57133d148
702,273
def wrap_hashlib(hasher, length=None): """ Wraps hashlib's functions, returning a function that returns the hex-digest of its input. >>> from hashlib import sha1 >>> wrap_hashlib(sha1)(b'heyo') 'f8bb1031d6d82b30817a872b8a2ec31d5380cee5' :param hasher: A function from :mod:`hashlib` :return: Function """ args = [] if length is not None: args = [length] def _hasher(data): return hasher(data).hexdigest(*args) return _hasher
dbd07d4151a5c5c523fe75c3f29b72abfd15c3b8
702,274
import re def isBlank(s): """ Returns True if string contains only space characters.""" return bool(re.compile("^\s*$").match(s))
1e6f7f7cefa4fea3d5b7443d74a265a79c3db3d7
702,275
def __extract_tzd(m): """Return the Time Zone Designator as an offset in seconds from UTC.""" if not m: return 0 tzd = m.group("tzd") if not tzd: return 0 if tzd == "Z": return 0 hours = int(m.group("tzdhours"), 10) minutes = m.group("tzdminutes") if minutes: minutes = int(minutes, 10) else: minutes = 0 offset = (hours*60 + minutes) * 60 if tzd[0] == "+": return -offset return offset
5e786cab67a2151df8ed8851dc19a6adbc365aea
702,276
def compute_heuristic_conn_4(init_pos, coord): """Returns Manhattan heuristic for distance from coord to init_pos init_pos - coordinate of position of goal configuration coord - coordinate of configuration for which heursitic is being computed Returns the heuristic distance to goal through a Manhattan metric calculation. """ return sum(map(lambda x, y: abs(x - y), coord, init_pos))
873fcbad5ebadcb8d0f0009c6d3bb615146bab5a
702,279
def frequency(text, char): """ Counts frequency of a character in a string. """ count = 0 for c in text: if c == char: count += 1 return count
5a58161f6aed1f8ba88ed6490891b544b23449cd
702,281
import time def measure_command(func, kwargs): """ Measures the execution time of a function :param func: function :param kwargs: dict keyword arguments :return: float, result (time, result of fucntion) """ time_start = time.time() r = func(**kwargs) dt = time.time() - time_start return dt, r
33ca8627681b3f32d8d39fc088175a6a38d51097
702,283
def largest_number(seq_seq): """ Returns the largest number in the subsequences of the given sequence of sequences. Returns None if there are NO numbers in the subsequences. For example, if the given argument is: [(3, 1, 4), (13, 10, 11, 7, 10), [1, 2, 3, 4]] then this function returns 13. As another example, if the given argument is: ([], [-1111111111111111], []) then this function returns -1111111111111111. As yet another example, if the given argument is: ([], [], []) then this function returns None. Preconditions: :type seq_seq: (list, tuple) and the given argument is a sequence of sequences, where each subsequence contains only numbers. """ # ------------------------------------------------------------------------- # DONE: 3. Implement and test this function. # Note that you should write its TEST function first (above). # ------------------------------------------------------------------------- largest = 0 for k in range(len(seq_seq)): for j in range(len(seq_seq[k])): if seq_seq[k][j] > largest or -seq_seq[k][j] > largest: largest = seq_seq[k][j] if largest != 0: return largest
5a2418e1f8ee0413e8306a04d3ee17a909b7b0c3
702,295
def __unwrap_nonsense_request(request): """ Unwrap the given "estimate nonsense" request into a string. Args: request: A JSON-like dict describing an "estimate nonsense" request (as described on https://clusterdocs.azurewebsites.net/) Returns: A string that represents the sentence of which to estimate the offensiveness """ # request = json.loads(request) sentence = request["sentence"] return sentence
793b5b352db1edd31e537e39bd7ac0c3f62e0fc0
702,297
def _clean(target_str: str, is_cellref: bool = False) -> str: """Rids a string of its most common problems: spacing, capitalisation,etc.""" try: output_str = target_str.lstrip().rstrip() except AttributeError: raise AttributeError("Cannot clean value other than a string here.") if is_cellref: output_str = output_str.upper() return output_str
778658332059679356c399c7bb5b0c66383650d3
702,298
def get_values(record, tag): """Gets values that matches |tag| from |record|.""" keys = [key for key in record.keys() if key[0] == tag] return [record[k] for k in sorted(keys)]
7b75e300cbdb5c1840681c78af9adc4dc1f21838
702,299
from typing import List def expected_value(values: List[float]) -> float: """Return the expected value of the input list >>> expected_value([1, 2, 3]) 2.0 """ return sum(values) / len(values)
b856157d21bd8a82813bfb8ae39c4c5a1f3aef53
702,300
def is_valid_gadget(gadget, bad_chars): """Determine if a gadget is valid (i.e., contains no bad characters). Args: gadget (Gadget): A namedtuple-like object with `shellcode` and `asm` fields. bad_chars (bytearray): The bad characters not allowed to be present. Returns: bool: Whether the specified gadget is acceptable. """ gadget_chars = bytearray(gadget.shellcode) for bc in bad_chars: if bc in gadget_chars: return False return True
00189e08120e377ec873aa4267f3240d72943966
702,308
def month(dt): """ For a given datetime, return the matching first-day-of-month date. """ return dt.date().replace(day=1)
480fcdfd7a69f95aa071e2061efc4740802d72d6
702,309
def count_frequency(df, col): """Count the number of occurence value in a column.""" df['Freq'] = df.groupby(col)[col].transform('count') return df
28f502d79bacaba474c6df8b642f8e3f7875d1f3
702,310
def extract_tty_phone(service_center): """ Extract a TTY phone number if one exists from the service_center entry in the YAML. """ tty_phones = [p for p in service_center['phone'] if 'TTY' in p] if len(tty_phones) > 0: return tty_phones[0]
9c4d349e0c75b1d75f69cb7758a3aac7dca5b5a5
702,311
from pathlib import Path def get_data_dir() -> str: """Get a filepath to the drem data directory. Returns: str: Filepath to the drem data directory """ cwd = Path(__file__) base_dir = cwd.resolve().parents[3] data_dir = base_dir / "data" return str(data_dir)
16e1bf3d8eab4c4f58ec90f832fd15809bdbbbca
702,312
def sentinel(name): """Create a unique, one-off object with a useful repr""" def __repr__(_): return f'<{name}>' return type(name, (), {'__repr__': __repr__})()
6abe44a5b72bf7d5685c3d7ca97caf0b9c2ee49b
702,316
import math import torch def wrap_phi_to_2pi_torch(x): """Shift input angle x to the range of [-pi, pi] """ pi = math.pi x = torch.fmod(2 * pi + torch.fmod(x + pi, 2 * pi), 2 * pi) - pi return x
0905134f4cce5aae13f91e9e7900dd053a5861aa
702,318
def FixAbsolutePathInLine(line, relative_paths): """Fix absolute paths present in |line| to relative paths.""" absolute_path = line.split(':')[0] relative_path = relative_paths.get(absolute_path, absolute_path) if absolute_path == relative_path: return line return relative_path + line[len(absolute_path):]
e0db0d2f3a0973b4db5f3e551f164481861c0b56
702,319
def lazyprop(func): """Wraps a property so it is lazily evaluated. Args: func: The property to wrap. Returns: A property that only does computation the first time it is called. """ attr_name = '_lazy_' + func.__name__ @property def _lazyprop(self): """A lazily evaluated propery. """ if not hasattr(self, attr_name): setattr(self, attr_name, func(self)) return getattr(self, attr_name) return _lazyprop
b14f82b196177be207923744dda695d6aa70248f
702,321
def simpleCollision(spriteOne, spriteTwo): """ Simple bounding box collision detection. """ widthSpriteOne, heightSpriteOne = spriteOne.image.get_size() rectSpriteOne = spriteOne.image.get_rect().move( spriteOne.pos.x - widthSpriteOne / 2, spriteOne.pos.y - heightSpriteOne / 2) widthSpriteTwo, heightSpriteTwo = spriteTwo.image.get_size() rectSpriteTwo = spriteTwo.image.get_rect().move( spriteTwo.pos.x - widthSpriteTwo / 2, spriteTwo.pos.y - heightSpriteTwo / 2) return rectSpriteOne.colliderect(rectSpriteTwo)
b6084ad260e084effb11701a6b859e0a58c9d19b
702,325
import numbers import json import base64 def encode(d): """Encode an object in a way that can be transported via Mesos attributes: first to JSON, then to base64url. The JSON string is padded with spaces so that the base64 string has no = pad characters, which are outside the legal set for Mesos. """ if isinstance(d, numbers.Real) and not isinstance(d, bool): return repr(float(d)) else: s = json.dumps(d, sort_keys=True) while len(s) % 3: s += ' ' return base64.urlsafe_b64encode(s.encode('utf-8')).decode('ascii')
b1018b9eba2f136f9281dcb936d0903290abd505
702,327
def __read_dataset_item(path): """Reads data set from path returns a movie dict. Parameters ---------- path : str Absolute path of the MovieLens data set(u.data). Returns ------- rating_dict : dict Returns a dict of users, movies and ratings. { string:movie = { string:user = int:rating } } """ rating_dict = {} with open(path, "r") as reader: lines = reader.readlines() for line in lines: x = line.split(sep="\t") user = x[0] movie = x[1] rating = int(x[2]) if movie not in rating_dict: rating_dict[movie] = dict() rating_dict[movie][user] = rating else: rating_dict[movie][user] = rating return rating_dict
a87baecc5b0c28675bc715aab646bf41e4b40f96
702,329
import torch def query_ball_point(radius, nsample, xyz, new_xyz): """ Input: radius: local region radius nsample: max sample number in local region xyz: all points, [B, N, 3] new_xyz: query points, [B, S, 3] Return: group_idx: grouped points index, [B, S, nsample] """ device = xyz.device B, N, C = xyz.shape _, S, _ = new_xyz.shape group_idx = torch.arange(N, dtype=torch.long).to(device).view(1, 1, N).repeat([B, S, 1]) dists = torch.cdist(new_xyz, xyz) if radius is not None: group_idx[dists > radius] = N group_idx = group_idx.sort(dim=-1)[0][:, :, :nsample] group_first = group_idx[:, :, 0].view(B, S, 1).repeat([1, 1, nsample]) mask = group_idx == N group_idx[mask] = group_first[mask] return group_idx
e74992747103d11b6618ecf7daf035c4f83e9911
702,331