content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def height_to_imperial(height): """Converts height in cm to feet/inches.""" height_inches = height / 2.54 feet = int(height_inches) // 12 inches = height_inches % 12 return feet, inches
bdec165de4b1576e53f2c81e6d3fc9d60b82d8ee
19,707
def f(B, x): """A linear function for the ODR.""" return B*(x)
ac23fd53c27d784ad826a2ac4330a51f71e13d96
19,711
import struct def write_varint(data: int) -> bytes: """Given an integer, encode the integer into a varint. Args: data (int): The integer to encode into a varint. Returns: bytes: The encoded varint. """ packed_packets = list() while data != 0: current_byte = data & 0x7F data >>= 7 compiled_bytes = struct.pack("B", current_byte | (0x80 if data > 0 else 0)) packed_packets.append(compiled_bytes) return b"".join(packed_packets)
377235dcfb8737be3047d02912e44c806e1d61c4
19,721
def load_ldap_settings(config): """ Load all the ldap configuration settings into a dict LDAP configuration settings contain an ldap_ prefix. Args: config (dict): the global config Returns: (dict) All the ldap_ settings """ ldap_config = {} for key, value in config.items(): if key.lower().startswith("ldap_"): ldap_config[key] = value return ldap_config
658c54bd26240c85829e1ff96fa9a7d2b6ee045c
19,725
def schema_url(base_url): """URL of the schema of the running application.""" return f"{base_url}/swagger.yaml"
96926681bcbfdebc8da3a142571452c41fdfb785
19,726
import re def filter_vowel_cons_ratio(word, ratio=0.5): """Return True if the ratio of vowels to consonants is > `ratio`. This can be used as an ad-hoc pronunciation filter. :param word (str): The word :param ratio (float, optional): The ratio :rtype: int """ vowels = re.compile(r'[aeiouy]') consonants = re.compile(r'[^aeyiuo]') vmatch = re.findall(vowels, word.lower()) cmatch = re.findall(consonants, word.lower()) _ratio = float(len(vmatch)) / float(len(cmatch)) return _ratio > ratio
0bb87d6b6d40f83c9826eb0988888a9c1105db3b
19,728
def get_central_values(flat_input, input_size, center_size, palette_size): """ Takes a flat array which is assumed to represent input_size by input_size by palette_size data, and returns a flat array that represents the center_size by center_size central values of the original array. """ lc = input_size//2 - center_size//2 rs = flat_input.reshape((input_size, input_size, palette_size)) sel = rs[lc:lc+center_size, lc:lc+center_size, :] return sel.reshape([-1])
2b62988e4fd9dcee35fba949e29a3e8045b5f909
19,731
def normalizeCUAddr(addr): """ Normalize a cuaddr string by lower()ing it if it's a mailto:, or removing trailing slash if it's a URL. @param addr: a cuaddr string to normalize @return: normalized string """ lower = addr.lower() if lower.startswith("mailto:"): addr = lower if ( addr.startswith("/") or addr.startswith("http:") or addr.startswith("https:") ): return addr.rstrip("/") else: return addr
4a3c3a994fc07c17e3e0cbea1eb6be4dab67632f
19,733
def stringify_var_names(var_list, delimiter=""): """ Parameters ---------- var_list : list[str] Each list element is the name of a variable. Returns ------- result : str Concatenated variable names. """ result = var_list[0] for var_name in var_list[1:]: result += delimiter + var_name return result.lower()
159c5e2b6081afa33d835a8e784e6be029c0496b
19,735
def summarize_results(df, level='sample_name'): """ Summarize quality control results Args: df (pd.DataFrame): loaded using :func:`madic.io.read_transition_report` level (str): Choices: 'sample_name' or 'rep' Whether to return summary on a sample or replicate basis Returns: pd.DataFrame: DataFrame containing final pass / fail quality control results by peptide and sample """ eval_cols = [x for x in df.columns if x.startswith('pass_')] summarized = df.groupby([ level, 'pep'])[eval_cols].agg('all').reset_index() if 'interference' in df.columns: ic = df.groupby(['sample_name', 'pep']).interference.agg('any').reset_index() ic.rename(columns={'interference': 'interference_corrected'}, inplace=True) summarized = summarized.merge(ic) return summarized
caba6b77098d91cf0285559cf3e0965763112428
19,745
import re def _looks_like_url(txt): """ Return True if text looks like an URL (probably relative). >>> _looks_like_url("foo.bar") False >>> _looks_like_url("http://example.com") True >>> _looks_like_url("/page2") True >>> _looks_like_url("index.html") True >>> _looks_like_url("foo?page=1") True >>> _looks_like_url("x='what?'") False >>> _looks_like_url("visit this page?") False >>> _looks_like_url("?") False """ if " " in txt or "\n" in txt: return False if "/" in txt: return True if re.search(r'\?\w+=.+', txt): return True if re.match(r"\w+\.html", txt): return True return False
50fb8a40260b7d69bd535024f9ed2e348f88f5bf
19,752
def script_from_saved_model(saved_model_dir, output_file, input_arrays, output_arrays): """Generates a script for saved model to convert from TF to TF Lite.""" return u"""# --- Python code --- import tensorflow as tf lite = tf.compat.v1.lite saved_model_dir = '{saved_model_dir}' output_file = '{output_file}' converter = lite.TFLiteConverter.from_saved_model( saved_model_dir, input_arrays={input_arrays}, output_arrays={output_arrays}) tflite_model = converter.convert() with tf.io.gfile.GFile(output_file, 'wb') as f: f.write(tflite_model) print('Write file: %s' % output_file) """.format( saved_model_dir=saved_model_dir, output_file=output_file, input_arrays=input_arrays, output_arrays=output_arrays)
b00592217f316cd8bad99127e4e92f14a0f5910c
19,755
from typing import Iterable from typing import Any def argsort_iterable(x: Iterable[Any]) -> Iterable[int]: """Iterable of indexes that sort an array. This is equivalent to numpy argsort but works on any sortable python iterable and returns a python iterable. Evaluation is lazy, evaluating x only as needed for sorting. Args: x (Iterable[Any]): The array to be sorted; must be sortable Returns: Iterable[int]: The indexes that sort x """ return (i for _,i in sorted((xi,i) for i,xi in enumerate(x)))
60ebfeecbd71e613d13f6c4e26e1cbf72bf85c75
19,758
import re def check_email_valid(submission): """ Check if submission is a valid email address """ if re.match(r"[^@]+@[^@]+\.[^@]+", submission): return True else: return False
1528df90d59c4e0cedc8c030acec1cfcd6984e64
19,760
def _resolve_collections(collections): """ Split a list of raw collections into a list of database/collection tuples :param list[str] collections: :rtype: list[(str, str|None)] """ ret = [] for raw_collection in collections: attr_chain = raw_collection.split('.', 1) database = attr_chain[0] if len(attr_chain) == 2: ret.append((database, attr_chain[1])) else: ret.append((database, None)) return ret
dca56d8ea52317bf48ddcf0383b691eacbb34f95
19,761
def removeVowels(word): """ Recursive Function to remove alll vowels in a words/sentence. Parameters: word (string); the word in which the vowels are to be removed. Returns: A string with no vowels after the all recursions are complete. Raises: TypeError: If user enters an invalid number such as floats. Exception: If any unexpected error occurs. Eg, RuntimeError when there are too many recursive calls. """ try: if not(isinstance(word,str)): #Checking if input is a valid string. raise TypeError if len(word) == 0: #Base Case return word elif word[0] in "AEIOUaeiou": return removeVowels(word[1:]) #Skip that letter and proceed with the rest of letters in word else: # keep the first letter and proceed until length of word becomes 0. return word[0] + removeVowels(word[1:]) except TypeError: #If the provided input is not a string. print("Error: Please provide a valid word/sentence of type string and try again.") except: #If any other unexpected error occurs print("Error in removing vowels. Please try again.")
3e4d679160b911937df0fb3e9f94d913178330bc
19,763
import math def rotate(x,y, angle): """Transform coordinate (x,y) by angle angle is in radians not in degrees. """ new_x = x*math.cos(angle) - y * math.sin(angle) new_y = x*math.sin(angle) + y * math.cos(angle) return new_x, new_y
7a9a9d25ac1197d272ad969be57c932a6adbe35d
19,766
def XYZ_to_xy(XYZ): """Convert XYZ to xy Args: XYZ ([float, float, float]: X, Y, Z input values Returns: .[float, float] """ X, Y, Z = XYZ divider = (X + Y + Z) x = X / divider y = Y / divider return [x, y]
cc41bd7dda4339c813619171d2a885c420a276a5
19,770
def get_ints(min, max): """Return range based iterator with given min and max""" return range(min, max)
0791a4378b89cf96187b80255835c41ef32a41c5
19,776
def strip_prefix(name, strip_text): """ Strip the first section of an underscore-separated ``name``. If the first section matches the ``strip_text``, we'll remove it. Otherwise, the object will remain unchanged. Parameters ---------- name: ``str`` underscore_separated_name to strip from strip_text: ``str`` Text to strip from the name, if it matches the first segment Returns ------- stripped: ``str`` The ``name``, modified or unmodified. """ if name.startswith(strip_text): return name[len(strip_text)+1:] else: return name
0efe643933a0006617bc0ed9416a6ef931de34e1
19,781
def scriptExit(*args, **kwargs): """ Exit Nuke. """ return None
956efc2b3c2c6585974043d0ad8d36f80612a24c
19,785
def strip_unnamed(string: str) -> str: """When fusing headers coming from excel, the lines with NA values are named as `Unnamed:...`. This function filters them out. """ if string.strip().startswith("Unnamed:"): return "" else: return string.strip()
cf4167b23a96c5248491a13d149a0e10b6a7714a
19,790
import logging import pathlib def _entry_file_is_valid(key, entry): """Check the validity of a `file` field of an entry. Ensures that 1. the entry has a `file` field, 2. the `file` field is nonempty, 3. the file pointed to exists, and 4. the file pointed to is a file, not a directory. Returns ------- bool: True if the file is valid by the above definitions. False otherwise. """ if 'file' not in entry.keys(): logging.warn(f'No file in entry with key `{key}`. Skipping.') return False if entry['file'] == '': logging.warn(f'File field in entry with key `{key}` is ' 'empty. Skipping.') return False if not pathlib.Path(entry['file']).exists(): logging.warn(f"File `{entry['file']}` in entry with key " f"`{key}` does not exist. Skipping.") return False if not pathlib.Path(entry['file']).is_file(): logging.warn(f"File `{entry['file']}` in entry with key " f"`{key}` is not a file. Skipping.") return False return True
815ef3d2ce55028634a2bad0d3b1c9444ce2b752
19,799
def squareMean3( array, centerPixel ): """ Kernel neighborhood function for focal map algebra. Reutrns mean of a 3x3 square array. @param array - array from which to retrieve the neighborhood kernel @param centerPixel - (i,j) corrdinates of center pixel of kernel in the array @return - mean of 3x3 square neighborhood around centerPixel """ rows = centerPixel[0] - 1 cols = centerPixel[1] - 1 neighArray = array[rows:rows + 3, cols:cols + 3] return neighArray.mean()
e017f38483fdc72163adcd65ef9ef4900fa8b350
19,802
import random def generate_int_list(list=[], length=10, min_value=0, max_value=10): """Generate and return a list of random integers. The random values will be between min_value and max_value - 1. If list's length were less than length, it'll be completed with random values until list's length were equal to length. """ # Using a variable to check list length, avoid calling # +the same function (len()) more than once. length_list = len(list) while length_list < length: list.append(random.randrange(min_value, max_value)) length_list += 1 return list
f80ce3edd5cd1ca4189d1cda9c3d151ff4943d50
19,803
def __fix_context(context): """Return a new context dict based on original context. The new context will be a copy of the original, and some mutable members (such as script and css files) will also be copied to prevent polluting shared context. """ COPY_LISTS = ('script_files', 'css_files',) for attr in COPY_LISTS: if attr in context: context[attr] = context[attr][:] return context
ebaaa2b7afccf47a2d4e9eb40b481be46565b95e
19,804
import torch def residuals(target, predictions:list, total = True): """ Calculates the mean of the prediction error Parameters ---------- target : torch.Tensor The true values of the target variable predictions : list (torch.Tensor) The predicted expected values of the target variable total : bool, default = True - When total is set to True, return the overall mean of the error - When total is set to False, return the mean of the error along the horizon Returns ------- torch.Tensor The mean of the error, which depending on the value of 'total' is either a scalar (overall mean) or 1d-array over the horizon, in which case it is expected to increase as we move along the horizon. Generally, lower is better. Raises ------ ValueError When the dimensions of the predictions and targets are not compatible """ if predictions[0].shape != target.shape: raise ValueError('dimensions of predictions and targets need to be compatible') error = target - predictions[0] if total: return torch.mean(error) else: return torch.mean(error, dim=0)
98898cced03d3c0d6c1ea53af25b8987ade01ea7
19,808
import codecs def _reg_file_to_str(file_path): """Open file at given path and return as a string.""" with codecs.open(file_path, 'r', 'utf-16-le') as f: file_str = f.read() return file_str
657c50e5f6b410eb13c6e651054c98ac9db6da45
19,811
from pathlib import Path def guard_name_project_plus_name(filename: str, project_name: str): """ Create a header guard name composed of a project name the filename. Parameters ---------- filename : str The name of the file containing the header guard project_name : str The name of the C++ project to add to the header guard Returns ------- str The header guard """ path = Path(filename) if project_name: return f"{project_name}_{path.stem}_H".upper() return f"{path.stem}_H".upper()
53d310c8858934bd2453a62cf83aa1d4439be9c1
19,820
def options(occ=None, debyewaller=None, scattering_factor=None, moment=None, incident_polarisation_vector=(1, 0, 0), magnetic_formfactor=None, energy_kev=8, polarisation='sp', azi_ref_q=(1, 0, 0), psi=0, f0=0, f1=1, f2=0): """ Create an input dict that will work with all structure factor (sf_) functions :param occ: [m,1] array of atomic occupancies :param debyewaller: [n,m] array of thermal factors for each atom and reflection :param scattering_factor: array [n,m] or [n]: radiation dependent scattering factor/ form factor,/ scattering length :param moment: [m,3] array of magnetic moment direction in orthogonal basis :param incident_polarisation_vector: [1,3] direction of incident polarisation :param magnetic_formfactor: [n,m] array of magnetic form factors for each atom and relection :param energy_kev: float value of incident x-ray energy in keV :param azi_ref_q: [1,3] azimuthal refence, in cartesian basis (Q) :param psi: float value of the azimthal angle - the rotation out of the scattering plane. :param polarisation: str definition of the polarisation can be: ['ss','sp','ps','pp'] with 's'=sigma, 'p'=pi :param f0: float Flm value 0 (charge) :param f1: float Flm value 1 :param f2: float Flm value 2 :return: dict """ return locals()
0890c8a7680130d93e8de3093b51ac9bea5d7b52
19,824
def find_chromosome(l): """ Get name line and return chromosome """ return l.strip().split(" ")[1]
2a7dd92808699edb30a4fc1400bd887161cd14e5
19,827
def camel_case(string, delimiters=[" "]): """Converts a string into camel case Parameters ---------- string: str, the string to convert delimiter: str, the character that denotes separate words """ for delimiter in delimiters: str_list = [s[0].upper() + s[1:] for s in string.split(delimiter)] string = "".join([s for s in str_list]) return string
45d6ae68f19c634093934209d6599d6e6b0d6347
19,835
def get_metric(line, name, split): """ Get metric value from output line :param line: console output line :param name: name of metric :param split: split character :return: metric value """ start = line.find(name) + len(name) + 1 end = line.find(split, start) return line[start:end]
0a17f69983ed125b67ac513e3b7f658d5d1c35f5
19,838
def IsSwitch(op): """Return true if `op` is a Switch.""" return op.type == "Switch" or op.type == "RefSwitch"
03d8c4f458ee1316f3f1fe25f956cf1bf8b71af3
19,845
def path_cost(path): """The total cost of a path (which is stored in a tuple with the final action.""" # path = (state, (action, total_cost), state, ... ) if len(path) < 3: return 0 else: action, total_cost = path[-2] return total_cost
7f0fa8a3a35809977bc2aa75164b78e354126812
19,861
import re def count_items_matches(text, item, regexp): """ Counts the number of occurences of the item in the text """ expression = r'(^|%s)%s($|%s)' % (regexp, re.escape(item), regexp) pattern = re.compile(expression, flags=re.U) matches = re.findall(pattern, text) return len(matches)
37428503a3f10f6a6e2f5b6d89b0ba4183b33bb0
19,869
def get_all_refs(schema): """Get all ref links in a schema. Traverses a schema and extracts all relative ref links from the schema, returning a set containing the results. Parameters: schema: An OAS schema in the form of nested dicts to be traversed. Returns: set: All of the ref links found during traversal. """ all_refs = set() if type(schema) is dict: for key, val in schema.items(): if key == "$ref" and type(val) is str: all_refs.add(val) all_refs.update(get_all_refs(val)) elif type(schema) is list: for item in schema: all_refs.update(get_all_refs(item)) return all_refs
6b0c57d080f44f823cfaf8614da5b51abb9c86a4
19,875
def is_sorted(l): """ test whether a list is sorted """ return l == sorted(l)
1cd90853067708c7e133e5d619917d172b21e041
19,881
def get_index(substring, kmer_list): """Return set of indices of positions of substrings in a list of strings.""" return {k: k.find(substring) for k in kmer_list}
a1f31e3e01dde95c7e6b826fb9ca4893aab64ccf
19,884
def cli(ctx, role_id): """Display information on a single role Output: Details of the given role. For example:: {"description": "Private Role for Foo", "id": "f2db41e1fa331b3e", "model_class": "Role", "name": "Foo", "type": "private", "url": "/api/roles/f2db41e1fa331b3e"} """ return ctx.gi.roles.show_role(role_id)
72e0b8dc4d06d736e67bf1c4b8f70bd030c160b3
19,886
def parse_time_hms(s): """Convert HH:MM:SS to seconds.""" parts = [float(x) for x in s.split(":")] sec = 0 for i, part in enumerate(reversed(parts)): sec += part * 60**i return sec
3424545387e4a4c20726e001dab4fd9e39686e6d
19,889
from collections.abc import Mapping def combine_celltypes(df, cols_to_combine=None): """ Function to sum related cell types into a single column Inputs: - df: pandas dataframe. Output of td.tumor_deconvolve() - cols_to_combine: dictionary. Keys are the desired names of any new cell type columns, values are an arary of current column names to combine under the key name - Default = dictionary for combining common cell types from LM22 Outputs: - Pandas dataframe with columns combined as specified by the dictionary. All unmentioned column names are left as they are """ if cols_to_combine is not None: if isinstance(cols_to_combine, Mapping): pass else: raise TypeError("cols_to_combine must be a dictionary") else: # Use LM22 as default print("WARNING: No dictionary defined for combining columns... Attempting to use default dict for LM22 signatures") cols_to_combine = {'B cells':['B cells naive', 'B cells memory'], 'CD4 T cells':['T cells CD4 naive', 'T cells CD4 memory resting', 'T cells CD4 memory activated','T cells follicular helper', 'T cells regulatory (Tregs)'], 'CD8 T cells':['T cells CD8'], 'NK cells':['NK cells resting', 'NK cells activated'], 'Macrophages':['Macrophages M0', 'Macrophages M1', 'Macrophages M2'], 'Mast cells':['Mast cells resting','Mast cells activated'], 'Dendritic cells':['Dendritic cells resting', 'Dendritic cells activated'] } df2 = df.copy() for cell_type in cols_to_combine.keys(): try: df2[cell_type] = df2[cols_to_combine[cell_type]].sum(axis=1) df2.drop(cols_to_combine[cell_type],axis=1,inplace=True) except KeyError as e: print("WARNING: Failed to combine some columns: ") print("KeyError: "+str(e)) pass return df2
1215e49ad263fbae33a9eb2d9971b50c92cb4438
19,890
from typing import Tuple from typing import Optional def split_domain_port(netloc: str) -> Tuple[str, Optional[str]]: """ Splits the netloc into domain and port. >>> split_domain_port("example.com") ('example.com', None) >>> split_domain_port("example.com:80") ('example.com', '80') """ segments = netloc.split(":") if len(segments) > 1: return ":".join(segments[:-1]), segments[-1] return netloc, None
ce6724b0b05d1b409976b67527ccd5c50fbcbe01
19,891
def to_unicode(string, encoding='utf-8'): """Convert byte string to unicode.""" return str(string, encoding) if isinstance(string, bytes) else string
1372ddc1882ddfee28f4efb52695577404d827b2
19,897
def img_denormalize(img, img_mean, img_std, img_mode='rgb'): """ De-normalize the image array by multiply `img_std` and add the `img_mean` in the right image mode, the mean and std should correspond to `img_mode` Args: img (ndarray): Image array to be normalized. img_mean (tuple[float]): mean value for each channel of image. img_std (tuple[float]): std value for each channel of image. img_mode (str): `rgb` or `bgr`, to specify the img mode. Returns: normed_img (ndarray): de-normalized image array. """ assert img_mode in ['rgb', 'bgr'], "image mode must be 'rgb' or 'bgr'." return img * img_std + img_mean
1aa59c7956051e41b0c8621012b7e135e48165a2
19,901
def valid_string_int(text, minimum=None, maximum=None): """Validate string representative of integer. Returns True if it is a valid integer and between the minimum and maximum value.""" if not text.isdigit(): return False text = int(text) if minimum is not None and maximum is not None: if text not in range(minimum, maximum + 1): return False elif minimum is None and maximum is None: return True elif minimum is not None: if text >= minimum: return True elif maximum is not None: if text <= maximum: return True return True
72906fb85a55786504baecd059499fb46bb1b261
19,902
def bigend_2_int(p_bytes): """ Convert bigending bytestring to int """ l_ix = 0 l_int = 0 while l_ix < len(p_bytes): l_b = int(p_bytes[l_ix]) l_int = l_int * 256 + l_b l_ix += 1 return l_int
5c51a3752eec30804ab45185fb51c70be240a5b6
19,906
import torch def loss_KL(p,q): """ Kulback-Leibler divergence between two torch Tensors :param p: 1st tensor :param q: 2nd tensor :return: Kullback-Leibler divergence of p and q """ return torch.sum(p*torch.log(p/q)-p+q)
0e3d8ff6fa84f569c9c3f4532e33a179645e2252
19,907
def check_any_str(list_to_check, input_string): """Check if any items in a list have a string in them. Parameters ---------- list_to_check : list[str] A list of strings. input_string : str A string to check items in the list. Returns ------- Boolean True or False, depending on whether input_string is found in >= list item. """ return any(input_string in string for string in list_to_check)
d4c470281de2e2060ecdc575ed6935ef92d8886b
19,912
def parseMoClassName(className): """ Given a class name (aaaUserEp) returns tuple aaa,UserEp""" idx = -1 upperFound = False for c in className: idx += 1 if c.isupper(): upperFound = True break if upperFound: pkg = className[:idx] klass = className[idx:] else: pkg = className klass = "" return pkg, klass
c611f9e19b64674fcbd62944ca7e1d9a8cfd62c3
19,913
import json def dump_json(filename, data, **kwargs): """Serialize data as JSON-file Parameters ---------- filename : str data : list, dict, etc. Data to save. Chech json.dump for more details and description. """ with open(filename, 'w') as f: json.dump(data, f, **kwargs) return None
5a2f94021e640378e7924d3476238c3db67ed13b
19,915
def get_entry(root_node: dict, entry_name: str, default_value): """Gets the entry from the root node or creates one with the default value if none is existing in the root node Arguments: root_node {dict} -- Root node entry_name {str} -- Entry name default_value {[type]} -- Default value Returns: [type] -- Entry """ node = default_value try: node = root_node[entry_name] except: pass return node
57c7ddb0dc546677ccf58f731dbc19e42e925dbb
19,920
import re def replace_aea_add_statements( content: str, old_string: str, new_string: str, type_: str ) -> str: """Replace statements of the type: 'aea add <type> <old_string>'.""" if type_ != "agents": content = re.sub( fr"aea +add +{type_} +{old_string}", f"aea add {type_} {new_string}", content, ) return content
b0670f2ffc759266c2609be21efda86ca83a3ea1
19,921
import csv def gete2wlandw2el(datafile): """Get dict inputs for the EM algorithm. Parameters ---------- datafile: str path to datafile. The data file itself must be a csv file with 3 columns: question, worker, answer and the first row is the column names. Returns ------- dict Indexed by question Each value is a list of workers and labels assigned by them. For example, e2wl['1'][3] -> ['4', '3'] means that questoin '1', was answered as '3' by worker '4' dict Indexed by worker name Each value is a list of questions and labels assigned by the worker. For example, w2el['4'][0] -> ['1', '3'] means that worker '4', when asked question '1', assigned the label '3'. list list of unique labels in the dataset """ e2wl = {} w2el = {} label_set = [] f = open(datafile, 'r') reader = csv.reader(f) next(reader) for line in reader: example, worker, label = line if example not in e2wl: e2wl[example] = [] e2wl[example].append([worker, label]) if worker not in w2el: w2el[worker] = [] w2el[worker].append([example, label]) if label not in label_set: label_set.append(label) return e2wl, w2el, label_set
11af97514625b6750a14540bf9790c904d6cc141
19,922
def extract_info_attributes(turn): """Extract information attributes for current round using NLU annotations. Args: turn: Current round information Returns: get_attribute_matches: Information attributes """ user_annotation = eval(turn["transcript_annotated"]) assistant_annotation = eval(turn["transcript_annotated"]) annotation = user_annotation + assistant_annotation # annotation = user_annotation all_intents = [ii["intent"] for ii in annotation] get_attribute_matches = [] for index, intent in enumerate(all_intents): if any( ii in intent for ii in ("DA:ASK:GET", "DA:ASK:CHECK", "DA:INFORM:GET") ): # If there is no attribute added, default to info. if "." not in intent: get_attribute_matches.append("info") continue attribute = intent.split(".")[-1] if attribute == "info": new_matches = [ ii["id"].split(".")[-1] for ii in annotation[index]["slots"] if "INFO" in ii["id"] ] if len(new_matches): get_attribute_matches.extend(new_matches) else: get_attribute_matches.append("info") elif attribute != "": get_attribute_matches.append(attribute) return sorted(set(get_attribute_matches))
b995fbf439a7a3a7886e288319c680e45bba11cb
19,923
def unIndex(i, n, trunc): """ Converts a 1-dimensional index ``i`` with truncation ``trunc`` and number of modes ``n`` to a n-ary index. """ return [i // trunc**(n - 1 - m) % trunc for m in range(n)]
ef4d72e264de6225ea20eafc6d15f2f9721e2a9c
19,925
def to_list(results): """ Purpose: Simplify the ComputeSkipGrams result set :param results: a ComputeSkipsGrams result set looks like this [(u'Problems', u'installing'), (u'Problems', u'adobe'), (u'Problems', u'acrobat'), ... ,] :return: a list of results looks like this ["Problems installing", "Problems adobe", "Problems acrobat", ... ,] """ the_list = [] for result in list(results): the_list.append(" ".join(list(result))) return the_list
8c6e245a61303fbbc035f1d2f4b36d6e17a66970
19,928
from typing import List def binary_search(arr: List[int], key: int) -> int: """Returns the index of a given key in a sorted array, or -1 if key not found. 1. Get left and right indices. 2. Calculate the mid. 3. Depending on whether the key is bigger or smaller than mid, update left of right. Space: O(1) Time: log(n) :param arr: The sorted array to search :param key: The key to search for :return: The index of the key if found, -1 otherwise """ left = 0 right = len(arr)-1 while left <= right: mid = left + (right-left)//2 if key > arr[mid]: left = mid+1 elif key < arr[mid]: right = mid-1 else: return mid # Key not found return -1
ba3e268890eb0dbab8033df56bcf6941df08819c
19,936
def convert_image_name(cur,imgNum): """ Converts imgNum to image file name without extension, as used in adjacency2 table Parameters: ----------- cur : MySQLdb cursors imgNum : str IMG_Number field entry for table image """ sql = ("select IMG_File " "from image " "where IMG_Number = '%s' " %(imgNum)) cur.execute(sql) return cur.fetchone()[0].split('.')[0]
ca1087422415115bd703859ad330673f9b893b42
19,938
from typing import Tuple from typing import Type from typing import Any def get_types_filter(desired_types: Tuple[Type[Any], ...]): """Returns a value filter that only keeps values with the given types.""" return lambda arg_value: arg_value.type in desired_types
a245199b6d7bfa7203828b45d32cd62e1900e811
19,947
def _tf(word_occured_in_doc: int, total_words_in_doc: int) -> float: """Term frequency of a word in certain document. See: https://bit.ly/3zEDkMn """ assert word_occured_in_doc <= total_words_in_doc return word_occured_in_doc / total_words_in_doc
32bd03d0b068ad229b7d9871bf3665643d35021e
19,948
def pcmatrixrow(matrix, form, trans): """ Returns the row for the specified form and transposition level. Parameters ---------- matrix : tuple A pitch class matrix created by pcmatrix(). form : 'p' | 'r' | 'i' | 'ri' The row form to return: 'p' is prime, 'r' is retrogade, 'i' is inversion, and 'ri' is retrograde-inversion. trans : pc The pitch class transposition level of the row to return. Returns ------- A tuple containing the pitch classes for the given form and transposition. """ size = len(matrix) assert 0 <= trans < size, "Not a valid transposition level: {}.".format(trans) row = col = 0 if form in ['p', 'r']: while row < size: if matrix[row][col] == trans: break row += 1 assert row < size, "Not a valid row transposition: {}.".format(row) return matrix[row] if form == 'p' else matrix[row][::-1] elif form in ['i', 'ri']: while col < size: if matrix[row][col] == trans: break col += 1 assert col < size, "Not a valid row transposition: {}.".format(col) rng = range(0, size) if form == 'i' else reversed(range(0, size)) return tuple(matrix[r][col] for r in rng) else: raise Exception("Not a valid row form: {}".format(form))
76a149cefcb9c3420cfb0c569c17687b4c49cba1
19,952
def _return_kwargs(dummy_req, **kwargs): """A dummy api call that simply returns its keyword arguments.""" return kwargs
fa43ffcd84ee2016f24441306a2b92c37e6484c8
19,953
def get_clk_name(clk): """Return the appropriate clk name """ if clk == 'main': return 'clk_i' else: return "clk_{}_i".format(clk)
1204f7db18b74ede3afa72ed2aa434f9b2dac984
19,960
def remove_xenon(te_img, xe_img, te_iso, xe_iso, clamp_neg=True): """ Based on the abundances of the Xe and Te isotopes in question, and the Xe images, calculates the expected component the Te image that actually comes from Xe and subtracts it. By default the function assumes any pixels that become negative after subtraction contain no tellurium and so clamps them to 0. """ # Percentage abundance of different Xe isotopes xe_abundances = {'124': 0.095, '126': 0.089, '128': 1.910, '129': 26.401, '130': 4.071, '131': 21.232, '132': 26.909, '134': 10.436, '136': 8.857} """ Checks that the isotope requested is xenon contaminated. Returns the input array if not, and prints a warning. """ if str(te_iso)not in xe_abundances: print('{0} is not contaminated with Xe. Input image returned.'.format(str(te_iso))) return te_img ratio = xe_abundances[str(te_iso)] / xe_abundances[str(xe_iso)] scaled_xe = xe_img * ratio subtracted = te_img - scaled_xe # Clamp negative pixels to zero if clamp_neg if clamp_neg: subtracted[subtracted < 0] = 0 return subtracted
25c1f3d8d8f600290c1d41b520458dc6a00fc54a
19,965
def remove_utc(time_string): """ Removes UTC from end of time string. The purpose is to clean up time strings so they can be converted to Numpy datetime64 objects, since datetime64 cannot be initialized if if the input string includes the UTC marker. Args: - time_string (string): timestamp string from Purple Air dataset. Returns: - (string): the cleaned-up timestamp string (without "UTC") """ if time_string.endswith("UTC"): return time_string[:-3].strip() else: return time_string
f1f4a1cb6f8a1068a48610b4a821ef1eac3c7fb8
19,967
def get_max_length(graphs): """ Get the max length among sequences. """ max_length = 0 for cascade_id in graphs: # traverse the graphs for max length sequence for sequence in graphs[cascade_id]: max_length = max(max_length, len(sequence[0])) return max_length
2dfa82526a291c08bfef0c8e5edb06b693caa224
19,970
def quantum_state_preparation_qiskit(circuit, parameters, n = 2): """ Parameters ---------- circuit: qiskit circuit parameters: n*2 array each row contains rx and ry parameters Returns ------- qiskit circuit """ q = circuit.qregs for i in range(len(q)): circuit.rx(parameters[i][0], q[i]) circuit.ry(parameters[i][1], q[i]) circuit.cx(q[1], q[0]) return circuit
5443d7a4c29c6ab003f71e345d5a07925f538c21
19,973
def string_to_dict(string): """Return dictionary from string "key1=value1, key2=value2".""" if string: pairs = [s.strip() for s in string.split(",")] return dict(pair.split("=") for pair in pairs)
bf9de94d8bd54a2f65fc44ebbf8388f8fda58999
19,979
def firstUniqChar_v3(s: str) -> int: """Slowest approach. Worst case complexity is O(n^2). Not much better than v2.""" for k, c in enumerate(s): if s.find(c) == s.rfind(c): return k return -1
432012c1aa9681c320f26fc565925b379a13ffdc
19,981
def get_expr_for_end_pos(field_prefix="v.", pos_field="start", ref_field="ref"): """Compute the end position based on start position and ref allele length""" return "%(field_prefix)s%(pos_field)s + %(field_prefix)s%(ref_field)s.length - 1" % locals()
ecdb1a03d37105c92cca28d4d4ee5e3a9bdd542a
19,986
def cell_value(cell): """Returns cell value or an empty string.""" value = getattr(cell, 'value', '') if value is None or str(value).startswith('='): # we don't calculate expressions return '' else: return str(value)
8bce8818b0928042eab6b313ed4cd36e97ca90f5
19,987
def partition_app_list(app_list, n): """ :param app_list: A list of apps with models. :param n: Number of buckets to divide into. :return: Partition apps into n partitions, where the number of models in each list is roughly equal. We also factor in the app heading. """ num_rows = sum([1 + len(x['models']) for x in app_list]) # + 1 for app title num_rows_per_partition = num_rows / n result = [[] for i in range(n)] # start with n empty lists of lists partition = 0 count = 0 for a in app_list: # will the app fit in this column or overflow? c = len(a['models']) + 1 # the +1 is for the app title # if we're not on the last partition, and the models list fits # more on the next partition than this one, start the next partition. if (partition < n - 1) and (count + c/2.0 > num_rows_per_partition): partition += 1 count = 0 result[partition].append(a) count += c return result
8d29daab146c4888b831bb09b2932e056a743d66
19,990
def reward_function(params): """ Example of rewarding the agent to stay inside two borders and penalizing getting too close to the objects in front """ all_wheels_on_track = params["all_wheels_on_track"] distance_from_center = params["distance_from_center"] track_width = params["track_width"] objects_distance = params["objects_distance"] _, next_object_index = params["closest_objects"] objects_left_of_center = params["objects_left_of_center"] is_left_of_center = params["is_left_of_center"] # Initialize reward with a small number but not zero # because zero means off-track or crashed reward = 1e-3 # Reward if the agent stays inside the two borders of the track if all_wheels_on_track and (0.5 * track_width - distance_from_center) >= 0.05: reward_lane = 1.0 else: reward_lane = 1e-3 # Penalize if the agent is too close to the next object reward_avoid = 1.0 # Distance to the next object distance_closest_object = objects_distance[next_object_index] # Decide if the agent and the next object is on the same lane is_same_lane = objects_left_of_center[next_object_index] == is_left_of_center if is_same_lane: if 0.5 <= distance_closest_object < 0.8: reward_avoid *= 0.5 elif 0.3 <= distance_closest_object < 0.5: reward_avoid *= 0.2 elif distance_closest_object < 0.3: reward_avoid = 1e-3 # Likely crashed # Calculate reward by putting different weights on # the two aspects above reward += 1.0 * reward_lane + 4.0 * reward_avoid return reward
a9f1490f999abfe819df300e522e74820edb821e
19,991
def _parse_overscan_shape(rows, columns): """ Parse the number of overscan rows and columns into indices that can be used to reshape arrays. :param rows: The number of overscan rows. :type rows: int :param columns: The number of overscan columns. :type columns: int """ if rows == 0 and columns == 0: return (0, 0) if rows == 0 and columns > 0: return (-1, columns) if rows > 0 and columns == 0: return (rows, -1) if rows > 0 and columns > 0: return (rows, columns)
e3b6bb9c5d5837e9628fcd226f2484d44d4ce454
19,992
def get_window_context(idx, tree, size): """Return a list of words within a 2*size window around the idx position.""" return [node.token for node in tree[max(0, idx-size) : idx] + tree[idx+1 : idx+size+1]]
7e61105f278757505dbcff0f98a3f144844476be
19,997
def value_in(value, choices): """Raise an exception if a value doesn't match one of the given choices.""" if value not in choices: raise ValueError("Expected one of %s, received %r." % (", ".join([repr(choice) for choice in choices]), value)) # pragma: no cover return value
98abbbcc09c3b042a4ccd359986ae9325d96c442
19,999
import collections def cal_frac_aneu(ploidy, ploidy_list): """ Calculate percentage of aneiploidy for each ploidy list Examples -------- >>> ploidy = [0, 1, 2, 4, 4] >>> ploidy_list = [0, 1, 2, 3, 4] >>> cal_frac_aneu(ploidy, ploidy_list) [0.2, 0.2, 0.2, 0, 0.4] Parameters ---------- ploidy : a list of ploidy ploidy_list : a list of ploidy Returns ------- a list of ploidy fractions """ total = len(ploidy) counts = collections.Counter(ploidy) frac = [] for dos in ploidy_list: if counts[dos]: frac.append(round(counts[dos]/total, 2)) else: frac.append(0) return frac
3940e750d2cd309ba6e341c0f13c42e9b911e0ea
20,000
def build_texts_from_movies(path_to_movie_dat): """ Extracts genre text from movies.dat to create semantic embeddings :param path_to_movie_dat: :return: dict of text list keyed by movie_id """ texts = {} with open(path_to_movie_dat, "r", encoding="ISO-8859-1") as f: for line in f: movie_id, title_and_year, genres = line.strip("\n").split("::") title = title_and_year[:-7] # year = title_and_year[-5:-1] sorted_genres = sorted(genres.split("|")) texts[movie_id] = [title] + sorted_genres return texts
e98a8e5eedee7a983431246f0e4968d6f4daaa40
20,002
import copy def override_repo_refs(repos, override_ref=None, overrides=None): """ Returns a new `repos` dictionary with the CLI overrides applied. """ overrides = overrides or {} if not override_ref and not overrides: return repos repos_copy = copy.deepcopy(repos) for repo, repo_data in repos.items(): if not repo_data: continue release_data = repo_data.get("openedx-release") if not release_data: continue local_override = overrides.get(str(repo), override_ref) if local_override: repos_copy[repo]["openedx-release"]["ref"] = local_override return repos_copy
83df5d47b6ceba385d05e2c94341fec9c559ea0d
20,003
import csv def get_entities(entities_file): """Returns an array of Medical Entities :param entities_file: Entities file csv :return: Array<[term:str, score:int]> """ entities = [] with open(entities_file, encoding='utf8') as ds1_file: csv_reader = csv.reader(ds1_file, delimiter=',') for row in csv_reader: entities.append([str(row[0]), row[1]]) return entities
038b3b04aa43906149e7d6799ac66ba9f7d719c0
20,005
def secondsToMMSS(secs): """Convert number of seconds to the string ``mm:ss``. Note: If the number of minutes is greater than 100, it will be displayed as such. Args: secs (int): Number of seconds. Returns str: String in the format of ``mm:ss``. """ secs = int(secs) minutes, seconds = divmod(secs, 60) return '{:02d}:{:02d}'.format(minutes, seconds)
6cf218ac1b45e2a338bd97bc28472f08077cbcf2
20,009
def pad_to_max_seq_length(ls, max_seq_length, pad_idx=0, pad_right=True, check=True): """Apply padding to an input sequence. Args: ls: sequence to pad. max_seq_length: max length up to which to apply padding. pad_idx: element to use for padding. pad_right: True if padding is applied to right side of sequence, False to pad on left side. check: True if result length should be checked as under the max sequence length. Returns: Sequence with specified padding applied. """ padding = [pad_idx] * (max_seq_length - len(ls)) if pad_right: result = ls + padding else: result = padding + ls if check: assert len(result) == max_seq_length return result
687954fcda10e14e93b994df2c43e320b29731fd
20,011
def get_languages(project_data): """ Get the available languages for the crowdin project. Parameters ---------- project_data : dict Crowdin project data. Returns ------- dict Available languages on crowdin. """ result = {} for language in project_data["targetLanguages"]: result[language["locale"]] = {"id": language["id"], "name": language["name"]} return result
87d4c845e2d64e90d93add2600eae99c25960864
20,014
def name(who): """Return the name of a player.""" return "Player {0}".format(who)
ab75e42a9dc70217a148475d32779da0b1e81d75
20,018
def tv(five_year_fcf, wacc, g= 0.03): """Returns terminal value using Gordon Growth formula.""" last_fcf = five_year_fcf[-1] return last_fcf*(1+g) / (wacc - g)
965175a6d83687cf22d477d6e2aa4755fc48228e
20,020
import re def match_scene_name(scene_name): """ Args: scene_name (str): FloorPlanXX-<random_seed(int) | default> Returns a tuple of scene name and seed (or 'default') if we can match the format.""" m = re.match("^FloorPlan[0-9]+-([0-9]+|default)$", scene_name) if m is not None: return m.group().split("-") return None
d8d2de675e102984007e735992fb9c0696d4845c
20,023
def design_thick_spherical_transform_lens(n, w, f): """Choose radius of curvature and thickness for a Fourier transform with given focal length and working distance. Args: n: refractive index w: working distance f: transform focal length Returns: roc: radius of curvature d: center thickness """ # Derivation p68 Dane's Fathom logbook #2 roc = f*(n - 1) + w*(n - 1) d = (f - w)*n*roc/(f*(n - 1)) return roc, d
a3ebcdd19cb95a4d369252984e7342a6ee338c48
20,024
def arb_callable(n): """Arb callable for testing.""" return n * 2
1cac3ad2ab9ca74197e91d8404cf21f65b2aa74d
20,025
def _get_header_info(line): """ Get number of sequences and length of sequence """ header_parts = line.split() num_seqs, length = list(map(int, header_parts[:2])) is_interleaved = len(header_parts) > 2 return num_seqs, length, is_interleaved
be7fc522fb8d195af6e45c93e42867aecbd23fb6
20,026
import json def loadJsonArgs(fn): """ Load the .json file containing input values Args: fn: file name Returns: args: argument dictionary """ with open(fn) as data_file: data = json.load(data_file) args = {} args['patient_id'] = data['Patient ID'] args['start_phase'] = data['Start Phase'] args['total_phase'] = data['Total Phase'] args['im_name'] = data["Image Name"] args['model_output']=data["Output Surface Model Name"] args['seg_name'] = data["Segmentation Name"] args['im_top_dir'] = data["Image Top Dir"] args['seg_folder_name'] = data["Segmentation Folder Name"] args['im_folder_name'] = data["Image Folder Name"] args['out_dir'] = data["Output Dir Name"] args['num_interpolation']=data["Number of Interpolations"] args['num_cycle'] = data["Number of Cardiac Cycles"] args['duration'] = data["Cycle Duration (s)"] args['edge_size'] = data["Mesh Size"] args['mask_folder_name'] = data["Mask Folder Name"] return args
51e8a0fdaf53836cf831701ff6921479a8d8e03f
20,030
from pathlib import Path def get_cwd() -> Path: """Determine the current working directory. :return: The appropriate current working directory path :rtype: ~pathlib.Path """ return Path.cwd()
3db9c2b613f02f0678cd0f994ad12701b9229a5a
20,032
def bswap(data): """ Byteswap data """ return data.byteswap()
1d0bf90f948e441514aa7bc0aff0cbe2ecd8283b
20,034
import json def _GetKeysAsDict(keys_input): """Converts |keys_input| into a dictionary. Args: keys_input: A dictionary or a string pointing to a JSON file. The contents of either should be Skia Gold config data. Returns: A dictionary containing the Skia Gold config data. """ if isinstance(keys_input, dict): return keys_input assert isinstance(keys_input, str) with open(keys_input) as f: return json.load(f)
db5d8e1bc08d326754163bd183f51dea9e7ad499
20,038
def query(conn, string: str): """Perform a query on titanic database and return result.""" curs = conn.cursor() curs.execute(f'{string}') result = curs.fetchall() return result
11c51879cad2cd99d64a3c64dbafe5c95edf6fdd
20,041
import torch def safe_log(x, eps=1e-7): """Avoid taking the log of a non-positive number.""" safe_x = torch.where(x <= eps, eps, x.double()) return torch.log(safe_x)
5d9029d51ee667bc69b0370e49e91fdf5f096c31
20,042
def puncify(s): """Replaces unicode characters with the appropriate ASCII punctuation""" return s.replace(u'\xa0', u' ').replace(u'\u201c', '"').replace(u'\u201d', '"').replace(u'\u2019', "'").replace(u"&amp;", '&').replace(u'\u2026', '...')
670db5ce03943365bd9683e4c75589418246a497
20,044
import re def extract_episode(text, seg_search, eg_search): """ Extract episode number from metadata. :param str text: Metadata containing episode number. :param str seg_search: Regex for a `Super Easy German` episode. :param str eg_search: Regex for an `Easy German` episode. :return: Episode number and type. :rtype: dict """ seg_match = re.search(seg_search, text, re.IGNORECASE) if seg_match: return { 'type': 'super_easy_german', 'number': seg_match.group().strip().replace('(', '').replace( ')', '') } eg_match = re.search(eg_search, text, re.IGNORECASE) if eg_match: return { 'type': 'easy_german', 'number': eg_match.group().strip() }
2215ba4b1aacbf8f3cff8e02bd4814a279e147ca
20,045
def bslice(high, low=None): """ Represents: the bits range [high : low] of some value. If low is not given, represents just [high] (only 1 bit), which is the same as [high : high]. """ if low is None: low = high return slice(low, high + 1)
d3a3085f8da638ef63c7d0f65605543f6f3605b7
20,046
def serialize_file_list(files: list, active_item_index: int = -1): """Returns a serialized file list, which JRiver requires in some API calls. These are a not documented further, but form a string of comma seperated values. These are, in order: [0] The value '2', stating a serialization version. Only 2 is supported these days. [1] The number of included keys [2] The active element (?), -1 for none [3]..[len(files + 3)]: The keys of the files. """ result = "2;" + str(len(files)) + ";" + str(active_item_index) for file in files: result += ";" + str(file["Key"]) return result
651df9d8cd228f10c9e0324a2d4b9cfc2e7adf10
20,049