content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def access_bit(data, num): """ from bytes array to bits by num position """ base = int(num // 8) shift = 7 - int(num % 8) return (data[base] & (1 << shift)) >> shift
fed874d0d7703c9e697da86c5a5832d20b46ebe5
704,105
def merge_set_if_true(set_1, set_2): """ Merges two sets if True :return: New Set """ if set_1 and set_2: return set_1.from_merge(set_1, set_2) elif set_1 and not set_2: return set_1 elif set_2 and not set_1: return set_2 else: return None
833e6925ef2b3f70160238cdc32516be2482082d
704,106
import pytz def localtime(utc_dt, tz_str): """ Convert utc datetime to local timezone datetime :param utc_dt: datetime, utc :param tz_str: str, pytz e.g. 'US/Eastern' :return: datetime, in timezone of tz """ tz = pytz.timezone(tz_str) local_dt = tz.normalize(utc_dt.astimezone(tz)) return local_dt
f48844c72895813fdcd3913cfe7de0e6f6d0ac3c
704,107
def is_url(url): """URL書式チェック""" return url.startswith("https://") or url.startswith("http://")
bc8f59d2e96e0a625317e86216b9b93077bbf8e2
704,108
import re def _preclean(Q): """ Clean before annotation. """ Q = re.sub('#([0-9])', r'# \1', Q) Q = Q.replace('€', ' €').replace('\'', ' ').replace(',', '').replace('?', '').replace('\"', '').replace('(s)', '').replace(' ', ' ').replace(u'\xa0', u' ') return Q.lower()
115828037b884108b9e3324337874c6b23dc066c
704,109
def HTTP405(environ, start_response): """ HTTP 405 Response """ start_response('405 METHOD NOT ALLOWED', [('Content-Type', 'text/plain')]) return ['']
f07522ac904ec5ab1367ef42eb5afe8a2f0d1fce
704,110
import sys import os def getProgramName(): """Get the name of the currently running program.""" progName = sys.argv[0].strip() if progName.startswith('./'): progName = progName[2:] if progName.endswith('.py'): progName = progName[:-3] # Only return the name of the program not the path pName = os.path.split(progName)[-1] if pName.endswith('.exe'): pName = pName[:-4] return pName
486c454756dea87b7b422fb00e80e1346183d9d2
704,111
def get_scenario_data(): """Return sample scenario_data """ return [ { 'population_count': 100, 'county': 'oxford', 'season': 'cold_month', 'timestep': 2017 }, { 'population_count': 150, 'county': 'oxford', 'season': 'spring_month', 'timestep': 2017 }, { 'population_count': 200, 'county': 'oxford', 'season': 'hot_month', 'timestep': 2017 }, { 'population_count': 210, 'county': 'oxford', 'season': 'fall_month', 'timestep': 2017 }, ]
b66ba716e6bd33e1a0ff80735acb64041663ed99
704,112
import os def add_it(workbench, file_list, labels): """Add the given file_list to workbench as samples, also add them as nodes. Args: workbench: Instance of Workbench Client. file_list: list of files. labels: labels for the nodes. Returns: A list of md5s. """ md5s = [] for filename in file_list: if filename != '.DS_Store': with open(filename, 'rb') as pe_file: base_name = os.path.basename(filename) md5 = workbench.store_sample(pe_file.read(), base_name, 'exe') workbench.add_node(md5, md5[:6], labels) md5s.append(md5) return md5s
88e85b8bcc2fdbf6fdac64b9b4e84d82e7ea3185
704,113
def get_percentage(numerator, denominator, precision = 2): """ Return a percentage value with the specified precision. """ return round(float(numerator) / float(denominator) * 100, precision)
7104f6bf2d88f9081913ec3fbae596254cdcc878
704,114
def _calculate_verification_code(hash: bytes) -> int: """ Verification code is a 4-digit number used in mobile authentication and mobile signing linked with the hash value to be signed. See https://github.com/SK-EID/MID#241-verification-code-calculation-algorithm """ return ((0xFC & hash[0]) << 5) | (hash[-1] & 0x7F)
173f9653f9914672160fb263a04fff7130ddf687
704,115
import argparse def parse_script_args(): """ """ parser = argparse.ArgumentParser(description="Delete images from filesystem.") parser.add_argument('--reg_ip', type=str, required=True, help='Registry host address e.g. 1.2.3.4') parser.add_argument('images', type=str, nargs='+', help='Images to delete in the form repo:tag, e.g. centos:latest') args = parser.parse_args() for image in args.images: if image.count(':') != 1: parser.error("Malformed image(s)") return args
fc77674d45f22febcb93b6bb0317cb5a0ef18e0f
704,116
def suggest_parameters_DRE_NMNIST(trial, list_lr, list_bs, list_opt, list_wd, list_multLam, list_order): """ Suggest hyperparameters. Args: trial: A trial object for optuna optimization. list_lr: A list of floats. Candidates of learning rates. list_bs: A list of ints. Candidates of batch sizes. list_opt: A list of strings. Candidates of optimizers. list_wd: A list of floats. weight decay list_multLam: A list of floats. Prefactor of the second term of BARR. list_order: A list of integers. Order of SPRT-TANDEM. Returns: learning_rate: A float. batch_size: An int. name_optimizer: A string. weight_decay: A float. param_multLam: A float. order_sprt: An int. """ # load yaml interprrets, e.g., 1e-2 as a string... for iter_idx in range(len(list_lr)): list_lr[iter_idx] = float(list_lr[iter_idx]) learning_rate = trial.suggest_categorical('learning_rate', list_lr) batch_size = trial.suggest_categorical('batch_size', list_bs) name_optimizer = trial.suggest_categorical('optimizer', list_opt) weight_decay = trial.suggest_categorical('weight_decay', list_wd) param_multLam = trial.suggest_categorical('param_multLam', list_multLam) order_sprt = trial.suggest_categorical('order_sprt', list_order) return learning_rate, batch_size, name_optimizer,\ weight_decay, param_multLam, order_sprt
627855f5fe8fd15d43cc7c8ca3da22b704b5907e
704,119
def format_seconds(seconds, hide_seconds=False): """ Returns a human-readable string representation of the given amount of seconds. """ if seconds <= 60: return str(seconds) output = "" for period, period_seconds in ( ('y', 31557600), ('d', 86400), ('h', 3600), ('m', 60), ('s', 1), ): if seconds >= period_seconds and not (hide_seconds and period == 's'): output += str(int(seconds / period_seconds)) output += period output += " " seconds = seconds % period_seconds return output.strip()
341ab077b9f83a91e89a4b96cb16410efab90c1c
704,120
def _contains_atom(example, atoms, get_atoms_fn): """Returns True if example contains any atom in atoms.""" example_atoms = get_atoms_fn(example) for example_atom in example_atoms: if example_atom in atoms: return True return False
c9e60d956585c185f9fb62cc0d11f169e6b79f88
704,121
def get_diff_level(files): """Return the lowest hierarchical file parts level at which there are differences among file paths.""" for i, parts in enumerate(zip(*[f.parts for f in files])): if len(set(parts)) > 1: return i
c9c3f774712684c6817c8bb5b3bf9c101e1df8fa
704,122
def get_max(list_tuples): """ Returns from a list a tuple which has the highest value as first element. If empty, it returns -2's """ if len(list_tuples) == 0: return (-2, -2, -2, -2) # evaluate the max result found = max(tup[0] for tup in list_tuples) for result in list_tuples: if result[0] == found: return result
91c662d5865de346a1ac73025ced78a996077111
704,123
def get_context(file_in): """Get genomic context from bed file""" output_dict = {} handle = open(file_in,'r') header = handle.readline().rstrip('\n').split('\t') for line in handle: split_line = line.rstrip('\n').split('\t') contig,pos,context = split_line[:3] if context == '.': continue try: output_dict[contig][pos] = context except KeyError: output_dict[contig] = {pos:context} return output_dict
90975b6eb929c546372fdce0eb449f455e9ffc18
704,124
import six def remove_nulls_from_dict(d): """ remove_nulls_from_dict function recursively remove empty or null values from dictionary and embedded lists of dictionaries """ if isinstance(d, dict): return {k: remove_nulls_from_dict(v) for k, v in six.iteritems(d) if v} if isinstance(d, list): return [remove_nulls_from_dict(entry) for entry in d if entry] else: return d
dd0da02eae06ceccc1347e6ac87dcb65bdc44126
704,125
import subprocess def ensure_installed(tool): """ Checks if a given tool is installed and in PATH :param tool: Tool to check if installed and in PATH :return: Full path of the tool """ proc = subprocess.Popen('export PATH=$PATH:/Applications/STMicroelectronics/STM32CubeMX.app/Contents/MacOs/:/usr/local/opt/arm-none-eabi-llvm/bin/ && which ' + tool, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) out, err = proc.communicate() exitcode = proc.returncode if exitcode == 0: print('Found {t} install in "{p}"'.format(t=tool, p=out.strip())) return out.strip() else: print(tool + ' is not installed (or is not in the PATH).') return out.strip()
6d77c686f679b693d2c29fa9750094573675a6d5
704,128
import random def is_zero(n, p=.5): """Return the sum of n random (-1, 1) variables divided by n. n: number of numbers to sum p: probability of 1 (probablity of -1 is 1-p) """ # This function should be about zero, but as n increases it gets better numbers = random.choices((-1, 1), weights=(1-p, p), k=n) return sum(numbers) / n
eae43e784d77cb50e1b0b48f6d32d5085164147d
704,129
def safe_divide(num, denom): """Divides the two numbers, avoiding ZeroDivisionError. Args: num: numerator denom: demoninator Returns: the quotient, or 0 if the demoninator is 0 """ try: return num / denom except ZeroDivisionError: return 0
144cf6bf8b53ab43f3ab2e16e7dd2c95f5408035
704,130
def route_distance(df, con): """ Given a route's dataframe determine total distance (m) using gid """ dist = 0 cur = con.cursor() for edge in df.edge[0:-1]: query = 'SELECT length_m FROM ways WHERE gid={0}'.format(edge) cur.execute(query) out = cur.fetchone() dist += out[0] return dist
ca83cca270a97b60b615f89c83541c0491abddcf
704,131
def convert_units(P, In='cm', Out='m'): """ Quickly convert distance units between meters, centimeters and millimeters """ c = {'m':{'mm':1000.,'cm':100.,'m':1.}, 'cm':{'mm':10.,'cm':1.,'m':0.01}, 'mm':{'mm':1.,'cm':0.1,'m':0.001}} return c[In][Out]*P
bc318011ffc71d575c7e7276c2dede467a84dc2c
704,132
def F16(x): """Rosenbrock function""" sum = 0 for i in range(len(x)-1): sum += 100*(x[i+1]-x[i]**2)**2+(x[i]+1)**2 return sum
7421ad45568a8b86aff41fc5c8466ae6ce7aeb9d
704,134
def from_package_str(item): """Display name space info when it is different, then diagram's or parent's namespace.""" subject = item.subject diagram = item.diagram if not (subject and diagram): return False namespace = subject.namespace parent = item.parent # if there is a parent (i.e. interaction) if parent and parent.subject and parent.subject.namespace is not namespace: return False return f"(from {namespace.name})" if namespace is not item.diagram.owner else ""
ab89c199aa886bff0ec88f6df37a655bb9ee7596
704,135
import subprocess def get_m(): """获取加密参数m""" return subprocess.check_output(['node', 'scrapy_ddiy/scripts/js/yuanrenxue/002.js']).decode().strip()
e079affe696805ba2f006e55da9a4abb90f53220
704,136
def div_up(a, b): """Return the upper bound of a divide operation.""" return (a + b - 1) // b
e297f2d08972ebc667d1f3eadca25ef885ef5453
704,137
def solar_elevation_angle(solar_zenith_angle): """Returns Solar Angle in Degrees, with Solar Zenith Angle, solar_zenith_angle.""" solar_elevation_angle = 90 - solar_zenith_angle return solar_elevation_angle
f896c5d0608171f3e5bd37cede1965fe57846d07
704,139
def choose_line(path_with_lines): """ Choose one line for each stations in path_with_lines list Args: path_with_lines(list): A list of dictionaries of stations and lines Returns: final_path(list): A list of dictionaries of station, line, and token """ final_path = [] i = 0 end = len(path_with_lines) - 1 token = 0 while i < end: if len(path_with_lines[i]['Line']) == 1: path_with_lines[i]['token'] = token final_path.append(path_with_lines[i]) i += 1 else: for line in path_with_lines[i]['Line']: for next_line in path_with_lines[i + 1]['Line']: if line == next_line: new_dict = {'Station':path_with_lines[i]['Station'], 'Line':[line], 'token': token} final_path.append(new_dict) break break i += 1 end_fin = len(final_path) if len(path_with_lines[end]) == 1: final_path.append(path_with_lines[end]) else: new_dict = {'Station': path_with_lines[end]['Station'], 'Line': final_path[end_fin - 1]['Line'], 'token': token} final_path.append(new_dict) i = 0 while i < end_fin: if final_path[i]['Line'] != final_path[i + 1]['Line']: final_path[i]['token'] = 1 i += 1 return final_path
7548d49a652f1cc2d4c75cee06ed63099713e733
704,140
import sys def get_vocabulary(fobj, threshold): """Read text and return dictionary that encodes vocabulary """ p_dict = dict() add_c = 0 for line in fobj: phrase = line.strip('\r\n ').split(' ||| ') src_list = phrase[0].split(' ') trg_list = phrase[1].split(' ') if len(src_list) == 1 or len(trg_list) == 1: # 長さが1のものは使わない continue elif len(src_list) == len(trg_list) and len(trg_list) > 1 and ( src_list[0] == trg_list[0] or src_list[-1] == trg_list[-1]): # 長さが同じ場合は,先頭か末尾が同じなら許容する pass elif not (src_list[0] == trg_list[0] and src_list[-1] == trg_list[-1]): # (長さが違う場合は)先頭と末尾が同じ場合だけ許容 continue p_src = phrase[0].strip('\r\n ') # .split() p_trg = phrase[1].strip('\r\n ') # .split() count = int(phrase[-1]) if p_trg not in p_dict: p_dict[p_trg] = [] if not (count < threshold): p_dict[p_trg].append((p_src, count)) add_c += 1 p = "" for w in trg_list[::-1]: p = w + " " + p if p != "" else w if p not in p_dict: p_dict[p] = [] sys.stderr.write('vocab Done len={} add_c={}\n'.format(len(p_dict), add_c)) return p_dict
7ed39da7e652c3108b5f27a021d19331104ee3e8
704,141
def parse_ucsc_file_index(stream, base_url): """Turn a UCSC DCC files.txt index into a dictionary of name-value pairs """ file_index = {} for line in stream: filename, attribute_line = line.split('\t') filename = base_url + filename attributes = {} for assignment in attribute_line.split(';'): name, value = assignment.split('=') attributes[name.strip()] = value.strip() file_index[filename] = attributes return file_index
2d74bae9c7f2584ff8d859c8d2781faa3f6631b5
704,142
def update_internal_subnets( self, ipv4_list: list = [ "10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "169.254.0.0/16", "224.0.0.0/4", ], ipv6_list: list = [], segment_ipv4_list: list = [], non_default_routes: bool = False, ) -> bool: """Update the list of internal subnets to use to classify internet traffic. .. list-table:: :header-rows: 1 * - Swagger Section - Method - Endpoint * - internalSubnets - POST - /gms/internalSubnets2 Any traffic not matching the internal subnets will be classified as internet traffic. This list will be pushed to all appliances. User can configure up to 512 subnets in each ipv4 and ipv6 entry. .. warning:: This will overwrite current subnets! :param ipv4_list: List of ipv4 networks in CIDR format for all VRFs, defaults to ["10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "169.254.0.0/16", "224.0.0.0/4"] :type ipv4_list: list, optional :param ipv6_list: List of ipv6 networks in CIDR format, defaults to [] :type ipv6_list: list, optional :param segment_ipv4_list: List of ipv4 networks each prefaced with related VRF id #, e.g. For VRF 1 only ["1:192.168.0.0/16"], defaults to [] :type segment_ipv4_list: list, optional :param non_default_routes: Treat non-default routes as internal subnets, defaults to False :param non_default_routes: bool, optional :return: Returns True/False based on successful call. :rtype: bool """ data = { "ipv4": ipv4_list, "ipv6": ipv6_list, "segmentIpv4": segment_ipv4_list, "nonDefaultRoutes": non_default_routes, } return self._post( "/gms/internalSubnets2", data=data, expected_status=[204], return_type="bool", )
ce1a11f2cbdb01c81fb01a13ba3d73c7ce5d0cf6
704,143
import os def AbsoluteCanonicalPath(*path): """Return the most canonical path Python can provide.""" file_path = os.path.join(*path) return os.path.realpath(os.path.abspath(os.path.expanduser(file_path)))
31c9a4e6a7a52b856b0f7575fc6a231f2887fba5
704,144
from operator import add def expand_to_point(b1, p1): """ Expand bbox b1 to contain p1: [(x,y),(x,y)] """ for p in p1: b1 = add(b1, (p[0], p[1], p[0], p[1])) return b1
5a79646403f7f9c2397aadb4f1826d8309eb8dcb
704,145
import collections def get_closing_bracket(string, indice_inicio): """Retorna o indice da '}' correspondente a '{' no indice recebido.""" if string[indice_inicio] != '{': raise ValueError("String invalida") deque = collections.deque() for atual in range(indice_inicio, len(string)): if string[atual] == '}' and string[atual-1] != '\\': deque.popleft() elif string[atual] == '{' and string[atual-1] != '\\': deque.append(string[indice_inicio]) if not deque: return atual # O '}' correpondente foi encontrado raise ValueError("String invalida")
5a865de5f5d3589e04f1c1e50f817ec20d8e712f
704,146
def read_matrix(): """Returns a matrix from the input integers, spit by ', '""" n = int(input()) matrix = [] for _ in range(n): row = [] for x in input().split(', '): row.append(int(x)) matrix.append(row) return matrix
7bd1e72fbf6c871324a02b0e11a2f10c2830bed2
704,147
import importlib def get_class_from_string(class_string: str): """Get class or function instance from a string, interpreted as Python module. :param class_string: :return: """ class_name = class_string.split(".")[-1] module = class_string.replace(f".{class_name}", "") lib = importlib.import_module(module) return getattr(lib, class_name)
5ffb49c23c815b4d3511b93a97a8a9aad4e30adb
704,148
import zlib def get_hash(value, max_hash): """Calculate split hash factor""" return zlib.adler32(str(value).encode()) % max_hash + 1
55a703997e4a8bc852def35d0cd418f009998f7e
704,149
def get_at_index(obj, index): """Возвращает объект списка с определенным индексом. Индексация списка 1...n """ try: return obj[index - 1] except IndexError: return None
8a70a6b7cff6bcaff173a5ebd258d74d271964ca
704,151
def crypto_lettre(dico: dict, lettre: str) -> str: """ Fonction qui renvoie une lettre cryptée d'après le dictionnaire associé :param ASCIIrandom: :param lettre: lettre MAJUSCULE :return: la lettre cryptée en MAJUSCULE """ return dico[lettre]
af46af6e3221587731b1c522bf50b8b75563835b
704,152
def check_cells_fit(cell_no, min_cell_distance, space_range=[[0,10],[0,10],None]): """ given the number of cells (cell_no), and the minimal distance between the cells and the space_ranges (x,y,z) it returns True if the cells can fit within this range and False if not. If any of the dimensions does not exist, type: None""" dim1, dim2, dim3 = space_range full_dim = 1. for dim in [dim1, dim2, dim3]: if dim != None: dim = dim[1]-dim[0] full_dim = full_dim*dim return full_dim / min_cell_distance >= cell_no
b2fa2cd1d7d84d6ef74a408c10293e88299987cf
704,153
def format_fields(field_data, include_empty=True): """Format field labels and values. Parameters ---------- field_data : |list| of |tuple| 2-tuples of field labels and values. include_empty : |bool|, optional Whether fields whose values are |None| or an empty |str| should be included in the formatted fields. Returns ------- str Formatted field labels and values. Examples -------- >>> field_data = [('Name', 'Jane'), ('Age', 30), ('DOB', None)] >>> format_fields(field_data, include_empty=True) Name: Jane Age: 30 DOB: None >>> field_data = [('Name', 'Jane'), ('Age', 30), ('DOB', None)] >>> format_fields(field_data, include_empty=False) Name: Jane Age: 30 """ max_label = 0 for (label, value) in field_data: label_length = len(label) if label_length > max_label: max_label = label_length fields = [] for (label, value) in field_data: empty = str(value).strip() in ['', 'None'] if not empty or include_empty: label_length = len(label.strip()) extra_spaces = ' ' * (max_label - label_length) label_sep = ':' + extra_spaces + ' ' joined_field = label_sep.join([label, str(value)]) fields.append(joined_field) return '\n'.join(fields)
cce4b5279e01c33fec0f83c6f86141c33012fc4c
704,155
import re def only_bf(txt): """ Strip a string from all characters, except brainfuck chars """ return re.sub(r"[^\.,<>\+-\]\[]", "", txt)
8c32b11d511f5c7b92d7454dcbfea09627ddf172
704,156
import subprocess def run_hidef_cmd(cmd): """ Runs hidef command as a command line process :param cmd_to_run: command to run as list :type cmd_to_run: list :return: (return code, standard out, standard error) :rtype: tuple """ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() return p.returncode, out, err
10ccaefedf262039d2cd7725fc3e8cb0c7f904d6
704,157
import re import string def validate_word(word, text): """Check if something is a valid "word" submission with previous existing text. Return (valid, formatted_word, message), where valid is a boolean, formatted_word is the word ready to be added to existing text (adding a space if applicable for example), and message is an error message if the word was not valid. It can be a word, or ?, !, . for now. Can make it a little more complicated later.""" if not text: if re.fullmatch("[a-zA-Z']+", word): return (True, string.capwords(word), "") else: return (False, "", "Story must begin with a word") if word == "": return (False, "", "You have to write something!") if re.fullmatch("[a-zA-Z']+", word): if text[-1] in ["?", ".", "!", "\n"]: return (True, (' ' + string.capwords(word)), "") else: return (True, (' ' + word), "") if re.fullmatch("\-[a-zA-Z']+", word): if not text[-1].isalpha(): return (False, "", "You can only hyphenate after a word.") if re.search("\-'", word): return(False, "", "An apostrophe cannot directly follow a hyphen.") else: return (True, word, "") if re.search(",", word): if re.fullmatch(", [a-zA-Z']+", word): if text[-1].isalpha(): return (True, word, "") else: return (False, "", "A comma can only come after a word.") else: return (False, "", "Invalid comma use.") if word in ["?", ".", "!"]: if text[-1].isalpha(): return (True, word, "") else: return (False, "", "Sentence-ending punctuation can only go after a word.") if " " in word: return (False, "", "Word cannot contain spaces except after a comma.") else: return (False, "", "Not a valid word for some reason (disallowed characters?)")
658873c8cbf446cbe53ec5f806db668ceecaa2cf
704,158
def process_content_updates(results): """Process Content Updates Args: results (Element): XML results from firewall Returns: max_app_version (str): A string containing the latest App-ID version """ app_version_list = [] version_list = results.findall('./result/content-updates/entry') for version in version_list: app_version = version.find('./version').text app_version_list.append(app_version) max_app_version = max(app_version_list) return max_app_version
021c9ac9246034874a1fe274fb49aabfa0f15d61
704,159
def cell_count(ring): """ >>> cell_count(0) == contour_len(0) True >>> cell_count(1) == contour_len(0) + contour_len(1) True >>> cell_count(2) == contour_len(0) + contour_len(1) + contour_len(2) True >>> cell_count(2) 25 >>> cell_count(3) 49 """ if ring == 0: return 1 else: return 1 + 4 * (ring + 1) * ring
90eaaaea4544f0db6f3216bea4971ce82004a9c4
704,160
def inverso(x): """ El inverso de un número. .. math:: \\frac{1}{x} Args: x (float): Número a invertir. Returns: float: El inverso. """ return 1 / x
16f2cb9466efa661d3ee8b10b6a0d637273f6b7c
704,161
def update_user_count_eponymous(set_of_contributors, anonymous_coward_comments_counter): """ Eponymous user count update. Input: - set_of_contributors: A python set of user ids. - anonymous_coward_comments_counter: The number of comments posted by anonymous user(s). Output: - user_count: The number of eponymous users active in the information cascade. """ user_count = len(set_of_contributors) return user_count
4d96d5f22c489a9bae9e0958bd83346df9d60b6c
704,162
import os def generate_arg_defaults(): """Return a dict of programmatically determined argument defaults.""" return {'user': os.getlogin(), 'uid': os.getuid(), 'gid': os.getgid(), 'tag': 'latest', 'dockerfile': './docker/escadrille'}
5865501d2355c92d0a8447bb36e9afc70ac84d66
704,164
def partition_annots_into_singleton_multiton(ibs, aid_list): """ aid_list = aid_list_ """ aids_list = ibs.group_annots_by_name(aid_list)[0] singletons = [aids for aids in aids_list if len(aids) == 1] multitons = [aids for aids in aids_list if len(aids) > 1] return singletons, multitons
7d062644923b12a59ef2e4bffd76ec4caf0bcaeb
704,165
def galeshapley(suitor_pref_dict, reviewer_pref_dict, max_iteration): """ The Gale-Shapley algorithm. This is known to provide a unique, stable suitor-optimal matching. The algorithm is as follows: (1) Assign all suitors and reviewers to be unmatched. (2) Take any unmatched suitor, s, and their most preferred reviewer, r. - If r is unmatched, match s to r. - Else, if r is matched, consider their current partner, r_partner. - If r prefers s to r_partner, unmatch r_partner from r and match s to r. - Else, leave s unmatched and remove r from their preference list. (3) Go to (2) until all suitors are matched, then end. Parameters ---------- suitor_pref_dict : dict A dictionary with suitors as keys and their respective preference lists as values reviewer_pref_dict : dict A dictionary with reviewers as keys and their respective preference lists as values max_iteration : int An integer as the maximum iterations Returns ------- matching : dict The suitor-optimal (stable) matching with suitors as keys and the reviewer they are matched with as values """ suitors = list(suitor_pref_dict.keys()) matching = dict() rev_matching = dict() for i in range(max_iteration): if len(suitors) <= 0: break for s in suitors: r = suitor_pref_dict[s][0] if r not in matching.values(): matching[s] = r rev_matching[r] = s else: r_partner = rev_matching.get(r) if reviewer_pref_dict[r].index(s) < reviewer_pref_dict[r].index(r_partner): del matching[r_partner] matching[s] = r rev_matching[r] = s else: suitor_pref_dict[s].remove(r) suitors = list(set(suitor_pref_dict.keys()) - set(matching.keys())) return matching
5b52cb165d15a0992b58c38958daf222d8d642cd
704,166
def get_solution(x): """ Args: x (numpy.ndarray) : binary string as numpy array. Returns: numpy.ndarray: graph solution as binary numpy array. """ return 1 - x
dd4c92baeaab0d3231f9b24cd950a42d589218aa
704,167
from pathlib import Path def create_upload_file(tmp_path): """Create temporary text file for upload.""" file_path = Path(tmp_path, "test_upload_1.txt") with open(file_path, "w") as f: f.write("Hello World") return file_path
50b707f59736ae1b1e06018aedec451b578eafc8
704,168
def get_path_up_down(path_source, path_target): """paths for up/down NOTE: both lists always show the LOWER level element even for the so for path up, it shows the source, for path down the target! Args: path_source(list) path_target(list) """ # find common part of path path_shared = [] for pu, pd in zip(path_source, path_target): if pu != pd: break path_shared.append(pu) n = len(path_shared) # root is always shared peak = path_shared[-1] path_down = path_target[n:] path_up = list(reversed(path_source[n:])) return path_up, peak, path_down
ba4719b42e0703ea0ac885de29b36466b7eb3676
704,169
import os def topic_arn(): """ Get the SNS topic ARN from environment variable :return: The SNS topic ARN """ return os.environ["SNS_TOPIC_ARN"]
e4729fbb47a4efefb2037dd5e590fba2706e43dc
704,170
def findCentroid(points): """ Compute the centroid for the vectors of a group of Active Site instance Input: n ActiveSite instances Output: the centroid vector """ centroid = [0.0,0.0,0.0] for item in points: centroid = [centroid[0]+item.vector[0],centroid[1]+item.vector[1],centroid[2]+item.vector[2]] centroid = [centroid[0]/len(points),centroid[1]/len(points),centroid[2]/len(points)] return centroid
16f0c4b0052edad8c37ca4abee93bff7c1d5937b
704,171
def json_replace(json_obj, **values): """ Search for elements of `{"{{REPLACE_PARAM}}": "some_key"}` and replace with the result of `values["some_key"]`. """ if type(json_obj) is list: return [json_replace(x, **values) for x in json_obj] elif type(json_obj) is dict: new = {} for key, value in json_obj.items(): if type(value) is dict and list(value) == ["{{REPLACE_PARAM}}"]: param_name = value["{{REPLACE_PARAM}}"] new[key] = values[param_name] else: new[key] = json_replace(value, **values) return new else: return json_obj
f6a8b44b5dd10d37140445b9dc8ebd71107df0a2
704,173
def is_android(filename): """ check if the files is an apk file or not """ with open(filename, "rb") as f: # AndroidManifest.xml if b"AndroidManifest.xml" in f.read(4096): return True return False
95405710bbf361eef9ddc4dff877745c4c42be02
704,174
def enrich(alert, rules): """Determine if an alert meets an enrichment rule :param alert: The alert to test :param rules: An array of enrichment rules to test against :returns: Alert - The enriched Alert object """ for enrichment in rules: updates = enrichment(alert) if not updates: continue for name, value in updates.items(): alert[name] = value return alert
97bf2d387e4c6e1ab38628860415bdf83c4634b9
704,175
def re(rm,rf,beta): """Returns cost of equity using CAPM formula.""" return rf + beta*(rm-rf)
5f91fd21ba1833dcb816ac767c8e1a15e2a30a5a
704,176
import socket def check_tcp_port(host, port, timeout=3): """ Try connecting to a given TCP port. :param host: Host to connect to :param port: TCP port to connect to :param timeout: Connection timeout, in seconds :return: True if the port is open, False otherwise. """ s = socket.socket() try: s.settimeout(timeout) s.connect((host, port)) except socket.error: return False else: s.close() return True
5e49ebab2c219e9772174d830dffcb958033befd
704,177
def isolated_margin_account(self, **kwargs): """Query Isolated Margin Account Info (USER_DATA) GET /sapi/v1/margin/isolated/account https://binance-docs.github.io/apidocs/spot/en/#query-isolated-margin-account-info-user_data Keyword Args: symbols (str, optional): Max 5 symbols can be sent; separated by ",". e.g. "BTCUSDT,BNBUSDT,ADAUSDT" recvWindow (int, optional): The value cannot be greater than 60000 """ return self.sign_request("GET", "/sapi/v1/margin/isolated/account", kwargs)
6109b995f9f64f850816963fa098117f4a4230fd
704,178
def get_sid_trid_combination_score(site_id, tr_ids_list, idfilt2best_trids_dic): """ Get site ID - transcript ID combination score, based on selected transcripts for each of the 10 different filter settings. 10 transcript quality filter settings: EIR EXB TSC ISRN ISR ISRFC SEO FUCO TCOV TSL idfilt2best_trids_dic: "site_id,filter_id" -> top transcript ID(s) after applying filter on exon IDs > min_eir >>> site_id = "s1" >>> idfilt2best_trids_dic = {"s1,EIR" : ["t1"], "s1,EXB" : ["t1"], "s1,TSC" : ["t1"], "s1,ISRN" : ["t1"], "s1,ISR" : ["t1"], "s1,ISRFC" : ["t1"], "s1,SEO" : ["t1"], "s1,FUCO" : ["t1"], "s1,TCOV" : ["t1"], "s1,TSL" : ["t1"]} >>> tr_ids_list = ["t1"] >>> get_sid_trid_combination_score(site_id, tr_ids_list, idfilt2best_trids_dic) {'t1': 10} >>> idfilt2best_trids_dic = {"s1,EIR" : ["t1", "t2"], "s1,EXB" : ["t1", "t2"], "s1,TSC" : ["t1"], "s1,ISRN" : ["t2"], "s1,ISR" : ["t1"], "s1,ISRFC" : ["t1"], "s1,SEO" : ["t1"], "s1,FUCO" : ["t1", "t2"], "s1,TCOV" : ["t1"], "s1,TSL" : ["t2"]} >>> tr_ids_list = ["t1", "t2", "t3"] >>> get_sid_trid_combination_score(site_id, tr_ids_list, idfilt2best_trids_dic) {'t1': 8, 't2': 5, 't3': 0} """ assert tr_ids_list, "tr_ids_list empty" filter_ids = ["EIR", "EXB", "TSC", "ISRN", "ISR", "ISRFC", "SEO", "FUCO", "TCOV", "TSL"] trid2comb_sc_dic = {} for tr_id in tr_ids_list: trid2comb_sc_dic[tr_id] = 0 for tr_id in tr_ids_list: for fid in filter_ids: sitefiltid = "%s,%s" %(site_id, fid) if tr_id in idfilt2best_trids_dic[sitefiltid]: trid2comb_sc_dic[tr_id] += 1 return trid2comb_sc_dic
9cc2d9a0f2fab4e4bf3030ef360b582caeaab45f
704,179
def statusName(dictname): """Return the underlying key used for access to the status of the dictlist named dictname. """ return (dictname, "S")
77700d17830c1521d543551a380ad611b050bda5
704,180
def ergsperSecondtoLsun(ergss): """ Converts ergs per second to solar luminosity in L_sun. :param ergss: ergs per second :type ergss: float or ndarray :return: luminosity in L_sun :rtype: float or ndarray """ return ergss / 3.839e33
806b590c713bc9177db66993aff2f6feaa32d736
704,181
from typing import Any def produces_record(obj: Any) -> bool: """Check if `obj` is annotated to generate records.""" if hasattr(obj, 'get_data_specs'): return True else: return False
b65ffe3d599963f8f5ee4d1581179ab7567aa074
704,182
import json def readJson(fname): """ Read json file and load it line-by-line into data """ data = [] line_num = 0 with open(fname, encoding="utf-8") as f: for line in f: line_num += 1 try: data.append(json.loads(line)) except: print("error", line_num) return data
0a4a78ce7e36fbc444b27ca6eec3ad5ba582b7cd
704,183
def binary_search(arr, val): """ Summary of binary_search function: searches an input array for a value and returns the index to matching element in array or -1 if not found. Parameters: array (array): An array of values val (integer): An integer value Returns: index (integer): Returns index of array element """ index = -1 start = 0 end = len(arr) - 1 found = False while (found == False) and (start <= end): # import pdb; pdb.set_trace() middle_index = (start + end) // 2 if (val == arr[middle_index]): index = middle_index found = True else: # reassign the end and start value excluding the middle index. if (val < arr[middle_index]): end = middle_index - 1 else: start = middle_index + 1 return index
3d5a44b5edce3820d1e669e549f9395d9052d433
704,184
def telegram_settings(): """set telegram client configuration. """ return { 'result': [] }
01aff4c347759ca34c609b69b53b4ce4880dc803
704,185
import torch def compute_jacobian(x, y, structured_tensor=False, retain_graph=False): """Compute the Jacobian matrix of output with respect to input. If input and/or output have more than one dimension, the Jacobian of the flattened output with respect to the flattened input is returned if `structured_tensor` is `False`. If `structured_tensor` is `True`, the Jacobian is structured in dimensions `[y_shape, flattened_x_shape]`. Note that `y_shape` can contain multiple dimensions. Args: x (list or torch.Tensor): Input tensor or sequence of tensors with the parameters to which the Jacobian should be computed. Important: the `requires_grad` attribute of input needs to be `True` while computing output in the forward pass. y (torch.Tensor): Output tensor with the values of which the Jacobian is computed. structured_tensor (bool): A flag indicating if the Jacobian should be structured in a tensor of shape `[y_shape, flattened_x_shape]` instead of `[flattened_y_shape, flattened_x_shape]`. Returns: (torch.Tensor): 2D tensor containing the Jacobian of output with respect to input if `structured_tensor` is `False`. If `structured_tensor` is `True`, the Jacobian is structured in a tensor of shape `[y_shape, flattened_x_shape]`. """ if isinstance(x, torch.Tensor): x = [x] # Create the empty Jacobian. output_flat = y.view(-1) numel_input = 0 for input_tensor in x: numel_input += input_tensor.numel() jacobian = torch.Tensor(y.numel(), numel_input) # Compute the Jacobian. for i, output_elem in enumerate(output_flat): if i == output_flat.numel() - 1: gradients = torch.autograd.grad(output_elem, x, retain_graph=retain_graph, create_graph=False, only_inputs=True) else: gradients = torch.autograd.grad(output_elem, x, retain_graph=True, create_graph=False, only_inputs=True) jacobian_row = torch.cat([g.view(-1).detach() for g in gradients]) jacobian[i, :] = jacobian_row if structured_tensor: shape = list(y.shape) shape.append(-1) jacobian = jacobian.view(shape) return jacobian
bd5fd8e3e2b8171680bf059d10fadfe1c39d8899
704,186
def meh(text): """ >>> meh(EXAMPLE_INPUT) [3, 8, 9, 1, 2, 5, 4, 6, 7] """ return [int(c) for c in text]
a295b94395f132cf4f8906fb293e9c989da1d7d1
704,187
import base64 import sys import pickle import subprocess def create_standalone_plot(fig, fname, backend=None): """ Create a script which can be executed to plot the given figure. Pickles the figure and stores it as string in the script. Parameter --------- fig : matplotlib.figure.Figure Matplotlib figure to store. fname : str File name. backend : str or None, optional Sets the used backend. Default is None. Expamle: 'Qt5Agg, TkAgg' Examples -------- Normal >>> create_standalone_plot(fig, 'pmf') Changing the backend >>> create_standalone_plot(fig, 'pmf', backend='Qt5Agg') """ def in_ipynb(): return 'ipykernel' in sys.modules pkl_string = pickle.dumps(fig, protocol=0) with open(fname, 'w') as fp: fp.write('#!/usr/bin/env python{}.{} \n'.format(sys.version_info.major, sys.version_info.minor)) fp.write('import pickle \n') if backend is not None: fp.write('import matplotlib \n') fp.write('matplotlib.use("{}")\n'.format(backend)) fp.write('import matplotlib.pyplot as plt \n') if sys.version_info.major < 3: fp.write("import base64 \n") fp.write("pkl_string = b'''{}''' \n".format(base64.b64encode(pkl_string))) fp.write('fig = pickle.loads( base64.b64decode(pkl_string) ) \n') else: fp.write("pkl_string = {} \n".format(pkl_string)) fp.write('fig = pickle.loads(pkl_string) \n') if in_ipynb(): fp.write('fig._original_dpi = {} \n'.format(fig.get_dpi())) fp.write('dummy = plt.figure(figsize={}, dpi={}) \n'.format( tuple(fig.get_size_inches()), fig.get_dpi())) fp.write('new_manager = dummy.canvas.manager \n') fp.write('new_manager.canvas.figure = fig \n') fp.write('fig.set_canvas(new_manager.canvas) \n') fp.write('plt.show() \n') subprocess.Popen("chmod +x {}".format(fname), shell=True) print("Created : \033[0;31m{}\033[0m".format(fname))
2fd128d46f1abd89364046444d0ccd99527afb70
704,188
def transform(record): """ Transforms (maps) a record. Parameters ---------- record : dict The record to transform. Returns ------- dict The transformed record. """ return { record["stakeholder_approach"]: { record["stakeholder_id"]: { "name": record["stakeholder_name"], record["deliverable_id"]: { "name": record["deliverable_name"] } } } }
cc9e378c96ee78c46f52184051c3d69568807e0b
704,189
def make_rows(cngrs_prsn): """Output a list of dicitonaries for each JSON object representing a congressperson. Each individaul dictionary will contain information about the congressperson as well as info about their term. """ name = cngrs_prsn["name"]["first"] + " " + cngrs_prsn["name"]["last"] birthday = cngrs_prsn["bio"].get("birthday", None) gender = cngrs_prsn["bio"]["gender"] terms = cngrs_prsn["terms"] rows = [] for t in terms: row = {} row["name"] = name row["birthday"] = birthday row["gender"] = gender row["term_start"] = t["start"] row["term_end"] = t["end"] row["term_type"] = t["type"] row["party"] = t.get("party") # Defaults to None rows.append(row) return rows
a80c55c3db1261a339ec08814c0f532efd35e45a
704,191
def _cons6_77(m6, L66, L67, d_byp, k, Cp, h_byp, dw1, kw1, dw2, kw2, adiabatic_duct=False, conv_approx=False): """dz constrant for edge bypass sc touching 2 corner bypass sc""" term1_out = 0.0 if not adiabatic_duct: if conv_approx: R2 = 1 / h_byp + dw2 / 2 / kw2 term1_out = L66 / m6 / Cp / R2 # conv / cond to duct 2 MW else: term1_out = h_byp * L66 / m6 / Cp # conv to outer duct if conv_approx: R1 = 1 / h_byp + dw1 / 2 / kw1 term1_in = L66 / m6 / Cp / R1 # conv / cond to duct 1 MW else: term1_in = h_byp * L66 / m6 / Cp term2 = 2 * k * d_byp / m6 / Cp / L67 # cond to adj bypass corner return 1 / (term1_in + term1_out + term2)
cedeaf3125454f4b73f082d43eeb7078a4b71412
704,192
def get_list_of_results(results): """Modify the outputs so that they are returned in a list format where it is sometimes easier to be used by other functions. Parameters ---------- results : list A list of named tuples for each iteration Returns ------- list, list, list Three lists that include all waits, services and blocks of all runs of all individuals """ all_waits = [w.waiting_times for w in results] all_services = [s.service_times for s in results] all_blocks = [b.blocking_times for b in results] all_props = [p.proportion_within_target for p in results] return all_waits, all_services, all_blocks, all_props
b5903e3b99aeb37ce90190e86a7cd6e2408ad35b
704,193
def statUniq(passwords, status): """produce data about unicity stats""" unicity = {"empty":0, "non empty": 0, "unique": 0} unicity['empty'] = passwords[status].count('') unicity['non empty'] = len( passwords[status] ) - unicity['empty'] unicity['unique'] = len( set( passwords[status] )) return unicity
645e20c4dceeb1ee7dee028776709ec739e8a6e0
704,194
import re def _read_logo(content): """ Read info from logo in file header. """ def _read_logo(pat): pattern = pat + r":\s+\S+" data_str = re.compile(pattern).search(content).group() return data_str.split(':')[1].strip() info = {} for pat in ['Version', 'Website']: info[pat] = _read_logo(pat) return info
e5ed2adb67c42854a3889dd823de6a3517cf1bad
704,195
def vigenere_decryption(text: str, key: str) -> str: """Декодирование шифра Виженера :param text: расшифровываемый текст :type text: str :param key: ключ :type key: str :return: исходный текст :rtype: str """ result = ''.join( [chr((ord(m) - ord(key[i % len(key)]) + 26) % 26+97) for i, m in enumerate(text)] ) return result
6ad2277d1060eab48481749023e40e11eb3590ea
704,196
import re def strip_emails(s): """ Remove digits from `s` using RE_EMAILS`. """ RE_EMAILS = re.compile(r"\S*@\S*\s?", re.UNICODE) return RE_EMAILS.sub("", s)
7c9a705023f1d5d821d002815f629bd7ebff8602
704,197
def get_entity_description(entity): """ Realiza o mapeamento de uma entidade padrão da extracão de entidades (named entity) retornando de forma explícita o equivalente em português para a entidade extraída. param : entity : <str> return : <str> """ ent_map = { 'PERSON': 'pessoa', 'PER': 'pessoa', 'NORP': 'nacionalidade ou grupos religiosos/políticos.', 'FAC': 'prédios, estradas, aeroportos, pontes...', 'ORG': 'empresas, agências, instituições...', 'GPE': 'países, cidades, estados.', 'LOC': 'Locais sem classificação geopolitica.', 'PRODUCT': 'objetos, veículos, alimentos...', 'EVENT': 'batalhas, guerras, eventos esportivos...', 'WORK_OF_ART': 'títulos de livros, canções...', 'LAW': 'documentos nomeados que virarm leis.', 'LANGUAGE': 'idioma', 'DATE': 'datas ou períodos absolutos ou relativos.', 'TIME': 'períodos de tempo menores que um dia.', 'PERCENT': 'percentual.', 'MONEY': 'valores monetários.', 'QUANTITY': 'medidas.', 'ORDINAL': 'primeiro, segundo, terceiro...', 'CARDINAIS': 'outros numerais.' } return ent_map.get(entity, entity)
21fe671419ba00436070ec49cc0433fabfb0c597
704,198
def return_last(responses): """Return last item of a list.""" return responses[-1]
f4aedfe0b10adcdb859ac1d0f5809ca666abac80
704,199
def get_boundary_cell_count(plate_dims, exclude_outer=1): """Get number of wells in outer or inner edges Parameters ---------- plate_dims : array dimensions of plate Returns ------- boundary_cell_count : int number of wells in the edges """ boundary_cell_count = 2 * (plate_dims[0] + plate_dims[1] - 2) if exclude_outer == 2: boundary_cell_count += 2 * (plate_dims[0]-2 + plate_dims[1]-2 - 2) return boundary_cell_count
8e5056af647f893854bab3de3e6e5038c0d703e1
704,201
def compute_border_indices(log2_T, J, i0, i1): """ Computes border indices at all scales which correspond to the original signal boundaries after padding. At the finest resolution, original_signal = padded_signal[..., i0:i1]. This function finds the integers i0, i1 for all temporal subsamplings by 2**J, being conservative on the indices. Maximal subsampling is by `2**log2_T` if `average=True`, else by `2**max(log2_T, J)`. We compute indices up to latter to be sure. Parameters ---------- log2_T : int Maximal subsampling by low-pass filtering is `2**log2_T`. J : int / tuple[int] Maximal subsampling by band-pass filtering is `2**J`. i0 : int start index of the original signal at the finest resolution i1 : int end index (excluded) of the original signal at the finest resolution Returns ------- ind_start, ind_end: dictionaries with keys in [0, ..., log2_T] such that the original signal is in padded_signal[ind_start[j]:ind_end[j]] after subsampling by 2**j References ---------- This is a modification of https://github.com/kymatio/kymatio/blob/master/kymatio/scattering1d/utils.py Kymatio, (C) 2018-present. The Kymatio developers. """ if isinstance(J, tuple): J = max(J) ind_start = {0: i0} ind_end = {0: i1} for j in range(1, max(log2_T, J) + 1): ind_start[j] = (ind_start[j - 1] // 2) + (ind_start[j - 1] % 2) ind_end[j] = (ind_end[j - 1] // 2) + (ind_end[j - 1] % 2) return ind_start, ind_end
09d29c4de2c808a1947d513580817bda16a6bfe7
704,202
def isbuffer(obj) -> bool: """ Test whether `obj` is an object that supports the buffer API, like a bytes or bytearray object. """ try: with memoryview(obj): return True except TypeError: return False
bede4ffeb154e765c7c2f4dea3bfa77281b313f2
704,203
def yes_maybe_condition_true(x: dict) -> bool: """ The yes maybe condition is true if 35% or 2 (or more) out of 3 users 2 (or more) out of 4 users 2 (or more) out of 5 users have classified as 'yes' or 'maybe' """ if x["yes_share"] + x["maybe_share"] > 0.35: return True else: return False
3009f2fdb6bdec69ab7f7530d47d40e1f493f8ba
704,204
from pathlib import Path from typing import Dict def git_named_refs(git_hash: str, git_dir: Path) -> Dict[str, str]: """ Returns all named tag or reference for the provided hash and the hash. This method does not need nor uses a git client installation. """ refs = dict(hash=git_hash) ref_dir = git_dir / 'refs' for item in ref_dir.glob('**/*'): if item.is_file() and git_hash == item.read_text(encoding='ascii').strip(): refs[item.parent.relative_to(ref_dir).as_posix()] = item.name return refs
0ad9f07c2885785a39f3918c43b0c795d4a864e7
704,205
def check_string(seq): """Checks if seq is a string""" if not isinstance(seq, str): assert False, "Input is not a string." else: pass return None
c56ce486fae2e1335b0b191b1804b19ab121f1c9
704,206
import json def parse_json(json_file): """JSON poem parser for 'Gongocorpus'. We read the data and find elements like title, author, year, etc. Then we iterate over the poem text and we look for each stanza, line, word and syllable data. :param json_file: Path for the json file :return: Dict with the data obtained from the poem :rtype: dict """ corpus_poem = json.loads(json_file.read_text()) corpus_name = json_file.parts[-5] poem = {} title = corpus_poem["incipit"] author = corpus_poem["author"] year = corpus_poem["year"] authorship = corpus_poem["authorship"] manually_checked = False scanned_poem = corpus_poem["scanned_poem"] poem_text = corpus_poem["text"] stanza_list = [] line_number = 0 for stanza_number, stanza in enumerate(poem_text.split("\n\n")): stanza_text = "".join(stanza) line_list = [] for line_text in stanza.split("\n"): scanned_line = scanned_poem[line_number] rythym_info = scanned_line["rhythm"] metrical_pattern = rythym_info["stress"] line_length = rythym_info["length"] word_list = [] for token in scanned_line["tokens"]: if "word" in token: word = token["word"] stress_position = token["stress_position"] syllables_text = [syl["syllable"] for syl in word] word_text = "".join(syllables_text) has_synalepha = [True for syl in word if "has_synalepha" in syl] word_dict = { "word_text": word_text, "stress_position": stress_position, "syllables": syllables_text } if True in has_synalepha: word_dict.update({ "has_synalepha": True, }) word_list.append(word_dict) line_list.append({ "line_number": line_number + 1, "line_text": line_text, "metrical_pattern": metrical_pattern, "line_length": line_length, "words": word_list, }) line_number += 1 stanza_list.append({ "stanza_number": stanza_number + 1, "stanza_type": "", "lines": line_list, "stanza_text": stanza_text, }) poem.update({ "poem_title": title, "author": author, "authorship": authorship, "year": year, "manually_checked": manually_checked, "stanzas": stanza_list, "corpus": corpus_name, }) return poem
e1944bd5cf18e913c22a8d910aafee608a654e0e
704,207
def compile(element, compiler, **_kw): # pylint: disable=function-redefined """ Get length of array defined in a JSONB column """ return "jsonb_typeof(%s)" % compiler.process(element.clauses)
917706c5aa05305c6d2930673d23194665b8c6ed
704,208
def aggregate(loss, weights=None, mode='mean'): """Aggregates an element- or item-wise loss to a scalar loss. Parameters ---------- loss : Theano tensor The loss expression to aggregate. weights : Theano tensor, optional The weights for each element or item, must be broadcastable to the same shape as `loss` if given. If omitted, all elements will be weighted the same. mode : {'mean', 'sum', 'normalized_sum'} Whether to aggregate by averaging, by summing or by summing and dividing by the total weights (which requires `weights` to be given). Returns ------- Theano scalar A scalar loss expression suitable for differentiation. Notes ----- By supplying binary weights (i.e., only using values 0 and 1), this function can also be used for masking out particular entries in the loss expression. Note that masked entries still need to be valid values, not-a-numbers (NaNs) will propagate through. When applied to batch-wise loss expressions, setting `mode` to ``'normalized_sum'`` ensures that the loss per batch is of a similar magnitude, independent of associated weights. However, it means that a given datapoint contributes more to the loss when it shares a batch with low-weighted or masked datapoints than with high-weighted ones. """ if weights is not None: loss = loss * weights if mode == 'mean': return loss.mean() elif mode == 'sum': return loss.sum() elif mode == 'normalized_sum': if weights is None: raise ValueError("require weights for mode='normalized_sum'") return loss.sum() / weights.sum() else: raise ValueError("mode must be 'mean', 'sum' or 'normalized_sum', " "got %r" % mode)
6d888d1854cfa78e13fcd5eba412e224164386d7
704,209
def get_lo_hi_from_CI(s, exclude=None): """ Parse the confidence interval from CI. >>> get_lo_hi_from_CI("20-20/40-60") (40, 60) """ a, b = s.split("|") ai, aj = a.split("-") bi, bj = b.split("-") los = [int(ai), int(bi)] his = [int(aj), int(bj)] if exclude and exclude in los: los.remove(exclude) if exclude and exclude in his: his.remove(exclude) return max(los), max(his)
69c0fb14afd18444465cb7b0f8b23990d044a2b9
704,211
def get_control_changes(midi, use_drums=True): """Retrieves a list of control change events from a given MIDI song. Arguments: midi (PrettyMIDI): The MIDI song. """ midi_control_changes = [] for num_instrument, midi_instrument in enumerate(midi.instruments): if not midi_instrument.is_drum or use_drums: for midi_control_change in midi_instrument.control_changes: midi_control_changes.append(( midi_instrument.program, num_instrument, midi_instrument.is_drum, midi_control_change )) return midi_control_changes
c3c264c11f9ef38aa79c24cd795e35145139beb1
704,213
def is_workinprogress(change): """Return True if the patchset is WIP :param dict change: De-serialized dict of a gerrit change :return: True if one of the votes on the review sets it to WIP. """ # This indicates WIP for older Gerrit versions if change['status'] != 'NEW': return True # Gerrit 2.8 WIP last_patch = change['patchSets'][-1] try: approvals = last_patch['approvals'] except KeyError: # Means no one has voted on the latest patch set yet return False for a in approvals: if a['type'] == 'Workflow' and int(a['value']) < 0: return True return False
ac2f5ba1ab8d5fd432ef7b13c5b033e0c3710fd4
704,214
import resource def get_total_cpu_time_and_memory_usage(): """ Gives the total cpu time of itself and all its children, and the maximum RSS memory usage of itself and its single largest child. """ me = resource.getrusage(resource.RUSAGE_SELF) children = resource.getrusage(resource.RUSAGE_CHILDREN) total_cpu_time = me.ru_utime + me.ru_stime + children.ru_utime + children.ru_stime total_memory_usage = me.ru_maxrss + children.ru_maxrss return total_cpu_time, total_memory_usage
2073440a0ef6e9185b5b4c7613a56c902a722dc3
704,215
import pickle def pickle_load(namefile: str): """Load Python variable, given name of file. :param namefile: A string of file to load. :return output: A loaded variable. """ with open(namefile, 'rb') as load_file: output = pickle.load(load_file) return output
425e53b8daf69bf832abc45a4270cc01f383c50e
704,216
def scale_matrix(matrix): """ nn works best with values between 0.01 and 1 """ return matrix / 255 * 0.99 + 0.01
b4c0d34a21724ee5712caf8dca131b3e1e1d0753
704,217
def negate(condition): """ Returns a CPP conditional that is the opposite of the conditional passed in. """ if condition.startswith('!'): return condition[1:] return "!" + condition
5f31ed3ee2f16a53674f830402fdec890af25032
704,218