content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def generate_modal(title, callback_id, blocks): """ Generate a modal view object using Slack's BlockKit :param title: Title to display at the top of the modal view :param callback_id: Identifier used to help determine the type of modal view in future responses :param blocks: Blocks to add to the modal view :return: View object (Dictionary) """ modal = { "type": "modal", "callback_id": callback_id, "title": { "type": "plain_text", "text": title, "emoji": False }, "submit": { "type": "plain_text", "text": "Submit", "emoji": False }, "close": { "type": "plain_text", "text": "Cancel", "emoji": False }, "blocks": blocks } return modal
e0caeec1ab1cf82ed6f02ec77a984dcb25e329f5
3,399
def linkCount(tupleOfLists, listNumber, lowerBound, upperBound): """Counts the number of links in one of the lists passed. This function is a speciality function to aid in calculating statistics involving the number of links that lie in a given range. It is primarily intended as a private helper function. The parameters are: tupleOfLists -- usually a linkograph entry. listNumber -- a list of the indicies in entry that should be considered. lowerBound -- the lowest index that should be considered. upperBound -- the highest index that should be considered. Example: a typical tupleOfLists is ({'A', 'B'}, {1,2}, {4,5}) a listNumber of [1] would only consider the links in {1,2}, a listNumber of [2] would only consider the links in {4,5} and a listNumber of [1,2] would consider the links in both {1,2}, and {4,5}. """ summation = 0 for index in listNumber: summation += len({link for link in tupleOfLists[index] if link >= lowerBound and link <= upperBound}) return summation
239fd8d3c01fe6c88444cfa7369459e3c76005dc
3,401
def my_vtk_grid_props(vtk_reader): """ Get grid properties from vtk_reader instance. Parameters ---------- vtk_reader: vtk Reader instance vtk Reader containing information about a vtk-file. Returns ---------- step_x : float For regular grid, stepsize in x-direction. step_y : float For regular grid, stepsize in y-direction. npts_x : float Number of cells in x-direction. npts_y : float Number of cells in y-direction. low_m_x : float Middle of first x cell high_m_x : float Middle of last x cell low_m_y : float Middle of first y cell high_m_y : float Middle of last y cell low_x : float Edge of first x cell high_x : float Edge of last x cell low_y : float Edge of first y cell high_y : float Edge of last y cell Notes ---------- 0: step_x 1: step_y 2: npts_x 3: npts_y 4: low_m_x - Middle of cells: first x cell 5: high_m_x - Middle of cells: last x cell 6: low_m_y - Middle of cells: first y cell 7: high_m_y - Middle of cells: last y cell 8: low_x - Edge of cells: first x cell 9: high_x - Edge of cells: last x cell 10: low_y - Edge of cells: first y cell 11: high_y - Edge of cells: last y cell """ vtk_output = vtk_reader.GetOutput() # Read attributes of the vtk-Array # num_cells = vtk_output.GetNumberOfCells() # num_points = vtk_output.GetNumberOfPoints() # whole_extent = vtk_output.GetExtent() grid_bounds = vtk_output.GetBounds() grid_dims = vtk_output.GetDimensions() # Grid information step_x = (grid_bounds[1] - grid_bounds[0]) / (grid_dims[0] - 1) step_y = (grid_bounds[3] - grid_bounds[2]) / (grid_dims[1] - 1) if grid_bounds[0] == 0.0: # CELLS npts_x = grid_dims[0] - 1 npts_y = grid_dims[1] - 1 low_m_x = grid_bounds[0] + 0.5 * step_x high_m_x = grid_bounds[1] - 0.5 * step_x low_m_y = grid_bounds[2] + 0.5 * step_y high_m_y = grid_bounds[3] - 0.5 * step_y low_x = grid_bounds[0] high_x = grid_bounds[1] low_y = grid_bounds[2] high_y = grid_bounds[3] else: # POINTS npts_x = grid_dims[0] npts_y = grid_dims[1] low_m_x = grid_bounds[0] high_m_x = grid_bounds[1] low_m_y = grid_bounds[2] high_m_y = grid_bounds[3] low_x = grid_bounds[0] - 0.5 * step_x high_x = grid_bounds[1] + 0.5 * step_x low_y = grid_bounds[2] - 0.5 * step_y high_y = grid_bounds[3] + 0.5 * step_y return step_x, step_y, \ npts_x, npts_y, \ low_m_x, high_m_x, low_m_y, high_m_y, \ low_x, high_x, low_y, high_y
26ef8a51648ea487372ae06b54c8ccf953aeb414
3,408
def state_space_model(A, z_t_minus_1, B, u_t_minus_1): """ Calculates the state at time t given the state at time t-1 and the control inputs applied at time t-1 """ state_estimate_t = (A @ z_t_minus_1) + (B @ u_t_minus_1) return state_estimate_t
0e04207028df8d4162c88aad6606e792ef618f5a
3,409
def get_n_runs(slurm_array_file): """Reads the run.sh file to figure out how many conformers or rotors were meant to run """ with open(slurm_array_file, 'r') as f: for line in f: if 'SBATCH --array=' in line: token = line.split('-')[-1] n_runs = 1 + int(token.split('%')[0]) return n_runs return 0
5574ef40ef87c9ec5d9bbf2abd7d80b62cead2ab
3,412
def update_image_version(name: str, new_version: str): """returns the passed image name modified with the specified version""" parts = name.rsplit(':', 1) return f'{parts[0]}:{new_version}'
cde798361a6c74d22f979fe013e963c46028a7e6
3,425
def _gen_roi_func_constant(constant_roi): """ Return a RoI function which returns a constant radius. See :py:func:`map_to_grid` for a description of the parameters. """ def roi(zg, yg, xg): """ constant radius of influence function. """ return constant_roi return roi
c7c69cf32fb289d5e9c9497474989aa873a231ba
3,427
import torch def load_checkpoints(checkpoint_name): """ Load a pretrained checkpoint. :param checkpoint_name: checkpoint filename :return: model.state_dict, source_vocabulary, target_vocabulary, """ # Get checkpoint from file checkpoint = torch.load(checkpoint_name, map_location=torch.device('cpu')) # The epoch when training has been left epoch = checkpoint['epoch'] # The time elapsed during training time_elapsed = checkpoint['time_elapsed'] # Get state_dict of the model model_state_dict = checkpoint['model_state_dict'] # Get the state_dict of the optimizer optimizer_state_dict = checkpoint['optimizer_state_dict'] # Get source language vocabulary src_vocabulary = checkpoint['src_vocabulary'] tgt_vocabulary = checkpoint['tgt_vocabulary'] return model_state_dict, optimizer_state_dict, epoch, time_elapsed, src_vocabulary, tgt_vocabulary
e81f094c811d497504fd1f93a8ee537e6b122bd6
3,430
def _extract_data(prices, n_markets): """ Extract the open, close, high and low prices from the price matrix. """ os = prices[:, :, :n_markets] cs = prices[:, :, n_markets:2*n_markets] hs = prices[:, :, 2*n_markets:3*n_markets] ls = prices[:, :, 3*n_markets:4*n_markets] return os, cs, hs, ls
154af0c8270fbe664b3dd5d07a724b753ff02040
3,431
import base64 import hmac import hashlib def sign_v2(key, msg): """ AWS version 2 signing by sha1 hashing and base64 encode. """ return base64.b64encode(hmac.new(key, msg.encode("utf-8"), hashlib.sha1).digest())
1aa54cc2cd3ce20ad5222a889754efda2f4632c3
3,435
def find_hcf(a, b) : """ Finds the Highest Common Factor among two numbers """ #print('HCF : ', a, b) if b == 0 : return a return find_hcf(b, a%b)
818bbc05ab9262e8fd1e8975daf68ca3e0fa6a8b
3,436
def stripper(reply: str, prefix=None, suffix=None) -> str: """This is a helper function used to strip off reply prefix and terminator. Standard Python str.strip() doesn't work reliably because it operates on character-by-character basis, while prefix/terminator is usually a group of characters. Args: reply: String to be stripped. prefix: Substring to remove from the beginning of the line. suffix: Substring to remove from the end of the line. Returns: (str): Naked reply. """ if prefix is not None and reply.startswith(prefix): reply = reply[len(prefix):] if suffix is not None and reply.endswith(suffix): reply = reply[:-len(suffix)] return reply
b48281a0dedd5d7f3d476943f12ac49720e67476
3,442
def advertisement_data_complete_builder(list_of_ad_entries): """ Generate a finalized advertisement data value from a list of AD entries that can be passed to the BLEConnectionManager to set the advertisement data that is sent during advertising. :param list_of_ad_entries: List of AD entries (can be built using blesuite.utils.gap_utils.advertisement_data_entry_builder) :type list_of_ad_entries: [str,] :return: Finalized AD data :rtype: str """ data = "" for ad in list_of_ad_entries: length = len(ad) ad_string = chr(length) + ad data = data + ad_string return data
c0f9040c36216cb519706c347d6644405fae0b7f
3,445
import math def quantize(x): """convert a float in [0,1] to an int in [0,255]""" y = math.floor(x*255) return y if y<256 else 255
b941a11d0d6af3162c964568e2d97c8d81cd1442
3,446
def get_package_version() -> str: """Returns the package version.""" metadata = importlib_metadata.metadata(PACKAGE_NAME) # type: ignore version = metadata["Version"] return version
a24286ef2a69f60871b41eda8e5ab39ba7f756c0
3,449
def safe_name(dbname): """Returns a database name with non letter, digit, _ characters removed.""" char_list = [c for c in dbname if c.isalnum() or c == '_'] return "".join(char_list)
2ce4978c3467abaddf48c1d1ab56ed773b335652
3,450
def concatenate_shifts(shifts): """ Take the shifts, which are relative to the previous shift, and sum them up so that all of them are relative to the first.""" # the first shift is 0,0,0 for i in range(2, len(shifts)): # we start at the third s0 = shifts[i-1] s1 = shifts[i] s1.x += s0.x s1.y += s0.y s1.z += s0.z return shifts
f4b0a41db1db78e3b5f25ca198fdb6cebd6476ca
3,451
from typing import Any def next_key(basekey: str, keys: dict[str, Any]) -> str: """Returns the next unused key for basekey in the supplied dictionary. The first try is `basekey`, followed by `basekey-2`, `basekey-3`, etc until a free one is found. """ if basekey not in keys: return basekey i = 2 while f"{basekey}-{i}" in keys: i = i + 1 return f"{basekey}-{i}"
e1da51c79fd465088294e053fdc970934268211b
3,452
def join_b2_path(b2_dir, b2_name): """ Like os.path.join, but for B2 file names where the root directory is called ''. :param b2_dir: a directory path :type b2_dir: str :param b2_name: a file name :type b2_name: str """ if b2_dir == '': return b2_name else: return b2_dir + '/' + b2_name
20f4e6e54f7f3b4a1583b503d4aa2d8995318978
3,454
import csv def read_loss_file(path): """Read the given loss csv file and process its data into lists that can be plotted by matplotlib. Args: path (string): The path to the file to be read. Returns: A list of lists, one list for each subnetwork containing the loss values over time. """ with open(path, 'r') as csvfile: reader = csv.reader(csvfile) data = [] for row in reader: # Ignore the epoch numbers if len(data) == 0: data = [[] for _ in row[1:]] for i in range(1, len(row)): data[i-1].append(float(row[i])) return data
8e861f0bf46db5085ea2f30a7e70a4bdfa0b9697
3,463
import torch def make_observation_mapper(claims): """Make a dictionary of observation. Parameters ---------- claims: pd.DataFrame Returns ------- observation_mapper: dict an dictionary that map rv to their observed value """ observation_mapper = dict() for c in claims.index: s = claims.iloc[c]['source_id'] observation_mapper[f'b_{s}_{c}'] = torch.tensor( claims.iloc[c]['value']) return observation_mapper
43052bd9ce5e1121f3ed144ec48acf20ad117313
3,467
def simplify(n): """Remove decimal places.""" return int(round(n))
9856c8f5c0448634956d1d05e44027da2f4ebe6a
3,468
import math def VSphere(R): """ Volume of a sphere or radius R. """ return 4. * math.pi * R * R * R / 3.
9e99d19683d9e86c2db79189809d24badccc197b
3,470
import re def decode_Tex_accents(in_str): """Converts a string containing LaTex accents (i.e. "{\\`{O}}") to ASCII (i.e. "O"). Useful for correcting author names when bib entries were queried from web via doi :param in_str: input str to decode :type in_str: str :return: corrected string :rtype: str """ # replaces latex accents with ascii letter (no accent) pat = "\{\\\\'\{(\w)\}\}" out = in_str for x in re.finditer(pat, in_str): out = out.replace(x.group(), x.groups()[0]) # replace latex {\textsinglequote} with underscore out = out.replace('{\\textquotesingle}', "_") # replace actual single quotes with underscore for bibtex compatibility out = out.replace("'", '_') return out
2a4bd71b53cdab047a1ddd1e0e6fd6e9c81b0e0a
3,473
def answer_question_interactively(question): """Returns True or False for t yes/no question to the user""" while True: answer = input(question + '? [Y or N]: ') if answer.lower() == 'y': return True elif answer.lower() == 'n': return False
52a123cc2237441de3b0243da268e53b7cc0d807
3,476
def other_players(me, r): """Return a list of all players but me, in turn order starting after me""" return list(range(me+1, r.nPlayers)) + list(range(0, me))
5c2d2b03bfb3b99eb4c347319ccaaa3fc495b6c4
3,477
def to_bytes(obj, encoding='utf-8', errors='strict'): """Makes sure that a string is a byte string. Args: obj: An object to make sure is a byte string. encoding: The encoding to use to transform from a text string to a byte string. Defaults to using 'utf-8'. errors: The error handler to use if the text string is not encodable using the specified encoding. Any valid codecs error handler may be specified. Returns: Typically this returns a byte string. """ if isinstance(obj, bytes): return obj return bytes(obj, encoding=encoding, errors=errors)
4f8a0dcfdcfd3e2a77b5cbeedea4cb2a11acd4c1
3,480
def midnight(date): """Returns a copy of a date with the hour, minute, second, and millisecond fields set to zero. Args: date (Date): The starting date. Returns: Date: A new date, set to midnight of the day provided. """ return date.replace(hour=0, minute=0, second=0, microsecond=0)
b92086dd9d99a4cea6657d37f40e68696ad41f7c
3,483
from typing import Callable from typing import Any from typing import Sequence def foldr(fun: Callable[[Any, Any], Any], acc: Any, seq: Sequence[Any]) -> Any: """Implementation of foldr in Python3. This is an implementation of the right-handed fold function from functional programming. If the list is empty, we return the accumulator value. Otherwise, we recurse by applying the function which was passed to the foldr to the head of the iterable collection and the foldr called with fun, acc, and the tail of the iterable collection. Below are the implementations of the len and sum functions using foldr to demonstrate how foldr function works. >>> foldr((lambda _, y: y + 1), 0, [0, 1, 2, 3, 4]) 5 >>> foldr((lambda x, y: x + y), 0, [0, 1, 2, 3, 4]) 10 foldr takes the second argument and the last item of the list and applies the function, then it takes the penultimate item from the end and the result, and so on. """ return acc if not seq else fun(seq[0], foldr(fun, acc, seq[1:]))
5648d8ce8a2807270163ebcddad3f523f527986e
3,484
def filter_chants_without_volpiano(chants, logger=None): """Exclude all chants with an empty volpiano field""" has_volpiano = chants.volpiano.isnull() == False return chants[has_volpiano]
3f03bbf3f247afd3a115442e8121a773aa90fb56
3,485
def sec2hms(sec): """ Convert seconds to hours, minutes and seconds. """ hours = int(sec/3600) minutes = int((sec -3600*hours)/60) seconds = int(sec -3600*hours -60*minutes) return hours,minutes,seconds
efea3a641c5f13313adb571c201cc25d2895757e
3,492
def do_json_counts(df, target_name): """ count of records where name=target_name in a dataframe with column 'name' """ return df.filter(df.name == target_name).count()
c4b0cb52f28372a7d53a92984b3212c66c1556ab
3,495
def count_subset_sum_recur(arr, total, n): """Count subsets given sum by recusrion. Time complexity: O(2^n), where n is length of array. Space complexity: O(1). """ if total < 0: return 0 if total == 0: return 1 if n < 0: return 0 if total < arr[n]: return count_subset_sum_recur(arr, total, n - 1) else: n_subsets_in = count_subset_sum_recur(arr, total - arr[n], n - 1) n_subsets_out = count_subset_sum_recur(arr, total, n - 1) return n_subsets_in + n_subsets_out
981b83014e75122dea814ace5b34b18f9803c3ad
3,499
def get_indexes_from_list(lst, find, exact=True): """ Helper function that search for element in a list and returns a list of indexes for element match E.g. get_indexes_from_list([1,2,3,1,5,1], 1) returns [0,3,5] get_indexes_from_list(['apple','banana','orange','lemon'], 'orange') -> returns [2] get_indexes_from_list(['apple','banana','lemon',['orange', 'peach']], 'orange') -> returns [] get_indexes_from_list(['apple','banana','lemon',['orange', 'peach']], ['orange'], False) -> returns [3] Parameters ---------- lst: list The list to look in find: any the element to find, can be a list exact: bool If False then index are returned if find in lst-item otherwise only if find = lst-item Returns ------- list of int """ if exact == True: return [index for index, value in enumerate(lst) if value == find] else: if isinstance(find,list): return [index for index, value in enumerate(lst) if set(find).intersection(set(value))] else: return [index for index, value in enumerate(lst) if find in value]
416d94de975603a60bf41974b8564cd868e503c0
3,502
from typing import Counter def find_duplicates(list_to_check): """ This finds duplicates in a list of values of any type and then returns the values that are duplicates. Given Counter only works with hashable types, ie it can't work with lists, create a tuple of the lists and then count if the list_to_check contains un-hashable items :param list_to_check: A list of values with potential duplicates within it :type list_to_check: list :return:The values that where duplicates :rtype: list """ try: counted_list = Counter(list_to_check) except TypeError: counted_list = Counter([tuple(x) for x in list_to_check]) return [key for key in counted_list if counted_list[key] > 1]
1d608a70e7fb9be2001c73b72d3c1b62047539b5
3,504
from typing import Any def none_to_default(field: Any, default: Any) -> Any: """Convert None values into default values. :param field: the original value that may be None. :param default: the new, default, value. :return: field; the new value if field is None, the old value otherwise. :rtype: any """ return default if field is None else field
894d71c2cc89b02dc14fd7ddcd3a949bdc336692
3,505
def gen_input_code(question, id): """ Returns the html code for rendering the appropriate input field for the given question. Each question is identified by name=id """ qtype = question['type'] if qtype == 'text': return """<input type="text" class="ui text" name="{0}" placeholder="your answer..." />""".format(id) elif qtype == 'code': return '<textarea class="ui text" name="{0}"></textarea>'.format(id) else: button_template = '<input type="radio" name="{0}" value="{1}"> {1}<br>' code = '' for choice in question['choices']: code = code + button_template.format(id, choice) return code
b76bea45c0ce847d664a38694732ef0b75c2a53c
3,508
def list_inventory (inventory): """ :param inventory: dict - an inventory dictionary. :return: list of tuples - list of key, value pairs from the inventory dictionary. """ result = [] for element, quantity in inventory.items(): if quantity > 0: result.append ((element, quantity)) return result
264f8cde11879be8ace938c777f546974383122c
3,509
import hashlib def get_fingerprint(file_path: str) -> str: """ Calculate a fingerprint for a given file. :param file_path: path to the file that should be fingerprinted :return: the file fingerprint, or an empty string """ try: block_size = 65536 hash_method = hashlib.md5() with open(file_path, 'rb') as input_file: buf = input_file.read(block_size) while buf: hash_method.update(buf) buf = input_file.read(block_size) return hash_method.hexdigest() except Exception: # if the file cannot be hashed for any reason, return an empty fingerprint return ''
b0ee4d592b890194241aaafb43ccba927d13662a
3,511
def compute_score_for_coagulation(platelets_count: int) -> int: """ Computes score based on platelets count (unit is number per microliter). """ if platelets_count < 20_000: return 4 if platelets_count < 50_000: return 3 if platelets_count < 100_000: return 2 if platelets_count < 150_000: return 1 return 0
dc6e9935555fbb0e34868ce58a8ad8bc77be8b0c
3,514
def obtener_atletas_pais(atletas: list, pais_interes: str) -> list: """ Función que genera una lista con la información de los atletas del país dado, sin importar el año en que participaron los atletas. Parámetros: atletas: list de diccionarios con la información de cada atleta. pais_interes: str. Retorna: atletas_pais: list con los diccionarios de los atletas del país. diccionario de cada atleta: {'nombre': str, 'evento': str, 'anio': int}. """ # Inicializar lista de atletas del país. atletas_pais = list() # Inicio de recorrido por la lista de atletas. for cada_atleta in atletas: # Definición de variables del atleta actual. anio_actual = cada_atleta['anio'] nombre_actual = cada_atleta['nombre'] evento_actual = cada_atleta['evento'] pais_actual = cada_atleta['pais'] # Verificación de nombre y rango de tiempo. if pais_actual == pais_interes: # Se añade el diccionario de atleta a la lista de atletas. atletas_pais.append({'nombre': nombre_actual, 'evento': evento_actual, 'anio': anio_actual}) return atletas_pais
4b03364a76af4e7818f977731b259fdfee6817ee
3,525
def oddify(n): """Ensure number is odd by incrementing if even """ return n if n % 2 else n + 1
dee98063cb904cf462792d15129bd90a4b50bd28
3,527
def concatenation(clean_list): """ Concatenation example. Takes the processed list for your emails and concatenates any elements that are currently separate that you may wish to have as one element, such as dates. E.g. ['19', 'Feb', '2018'] becomes ['19 Feb 2018] Works best if the lists are similar as it works by using the index of an element and joining it to other elements using a positive or negative index. """ index_of_item = clean_list.index("your chosen item") clean_list[:index_of_item] = [' '.join(clean_list[:index_of_item])] # joins together every element from start to the index of the item # to join elements mid-list: another_index = clean_list.index("another item") # date concatenation date_start = another_index - 3 date_end = another_index clean_list[date_start:date_end] = [' '.join(clean_list[date_start:date_end])] # joins the 3 elements before 'another item' index return clean_list
59b727f21e663f2836f6fe939f4979e9f7484f62
3,528
import torch def model_predict(model, test_loader, device): """ Predict data in dataloader using model """ # Set model to eval mode model.eval() # Predict without computing gradients with torch.no_grad(): y_preds = [] y_true = [] for inputs, labels in test_loader: inputs = inputs.to(device) labels = labels.to(device) outputs = model(inputs) _, preds = torch.max(outputs, 1) y_preds.append(preds) y_true.append(labels) y_preds = torch.cat(y_preds).tolist() y_true = torch.cat(y_true).tolist() return y_preds, y_true
0b43a28046c1de85711f7db1b3e64dfd95f11905
3,530
def get_gifti_labels(gifti): """Returns labels from gifti object (*.label.gii) Args: gifti (gifti image): Nibabel Gifti image Returns: labels (list): labels from gifti object """ # labels = img.labeltable.get_labels_as_dict().values() label_dict = gifti.labeltable.get_labels_as_dict() labels = list(label_dict.values()) return labels
3a4915ed50132a022e29cfed4e90905d05209484
3,532
def eh_menor_que_essa_quantidade_de_caracters(palavra: str, quantidade: int) -> bool: """ Função para verificar se a string é menor que a quantidade de caracters informados @param palavra: A palavra a ser verificada @param quantidade: A quantidade de caracters que deseja verificar @return: Retorna True em caso da palavra seja menor que a quantidade de caracters e False em caso negativo """ tamanho = len(palavra) eh_menor = False if tamanho < quantidade: eh_menor = True return eh_menor
827469606b0b93b78b63686465decbbbc63b9673
3,535
def check_diamond(structure): """ Utility function to check if the structure is fcc, bcc, hcp or diamond Args: structure (pyiron_atomistics.structure.atoms.Atoms): Atomistic Structure object to check Returns: bool: true if diamond else false """ cna_dict = structure.analyse.pyscal_cna_adaptive( mode="total", ovito_compatibility=True ) dia_dict = structure.analyse.pyscal_diamond_structure( mode="total", ovito_compatibility=True ) return ( cna_dict["CommonNeighborAnalysis.counts.OTHER"] > dia_dict["IdentifyDiamond.counts.OTHER"] )
ae082d6921757163cce3ddccbca15bf70621a092
3,536
def _rfc822_escape(header): """Return a version of the string escaped for inclusion in an RFC-822 header, by ensuring there are 8 spaces space after each newline. """ lines = header.split('\n') header = ('\n' + 8 * ' ').join(lines) return header
1a3cd02b057742db00ed741c40947cf4e19d1a86
3,540
def requiredOneInGroup(col_name, group, dm, df, *args): """ If col_name is present in df, the group validation is satisfied. If not, it still may be satisfied, but not by THIS col_name. If col_name is missing, return col_name, else return None. Later, we will validate to see if there is at least one None (non-missing) value for this group. """ if col_name in df.columns: # if the column name is present, return nothing return None else: # if the column name is missing, return column name return col_name
de46a4ef2f3e45381644db41d617d8c4c0845877
3,547
def persist(session, obj, return_id=True): """ Use the session to store obj in database, then remove obj from session, so that on a subsequent load from the database we get a clean instance. """ session.add(obj) session.flush() obj_id = obj.id if return_id else None # save this before obj is expunged session.expunge(obj) return obj_id
a308931f418616417d10d3115b0f370352778533
3,548
def enforce_excel_cell_string_limit(long_string, limit): """ Trims a long string. This function aims to address a limitation of CSV files, where very long strings which exceed the char cell limit of Excel cause weird artifacts to happen when saving to CSV. """ trimmed_string = '' if limit <= 3: limit = 4 if len(long_string) > limit: trimmed_string = (long_string[:(limit-3)] + '...') return trimmed_string else: return long_string
9b8bcf4590dc73425c304c8d778ae51d3e3f0bf3
3,554
def feature_norm_ldc(df): """ Process the features to obtain the standard metrics in LDC mode. """ df['HNAP'] = df['HNAC']/df['ICC_abs']*100 df['TCC'] = (df['ICC_abs']+df['DCC_abs'])/df['VOL'] df['ICC'] = df['ICC_abs']/df['VOL'] df['DCC'] = df['DCC_abs']/df['VOL'] return df
60e3ef31c0be07179854de3191c2c75f4ec2cb4d
3,557
def urls_equal(url1, url2): """ Compare two URLObjects, without regard to the order of their query strings. """ return ( url1.without_query() == url2.without_query() and url1.query_dict == url2.query_dict )
f2cbcf111cd5d02fa053fbd373d24b2dab047dfc
3,561
def bytes_to_ints(bs): """ Convert a list of bytes to a list of integers. >>> bytes_to_ints([1, 0, 2, 1]) [256, 513] >>> bytes_to_ints([1, 0, 1]) Traceback (most recent call last): ... ValueError: Odd number of bytes. >>> bytes_to_ints([]) [] """ if len(bs) % 2 != 0: raise ValueError("Odd number of bytes.") pairs = zip(bs[::2], bs[1::2]) return [(a << 8) + b for a, b in pairs]
e8ac9ec973ff58973703e3e109da5b45d3f9d802
3,562
def from_binary(bin_data: str, delimiter: str = " ") -> bytes: """Converts binary string into bytes object""" if delimiter == "": data = [bin_data[i:i+8] for i in range(0, len(bin_data), 8)] else: data = bin_data.split(delimiter) data = [int(byte, 2) for byte in data] return bytes(data)
f16706da2d5b9ae5984a35a13ebd02ae94581153
3,567
def one_on_f_weight(f, normalize=True): """ Literally 1/f weight. Useful for fitting linspace data in logspace. Parameters ---------- f: array Frequency normalize: boolean, optional Normalized the weight to [0, 1]. Defaults to True. Returns ------- weight: array The 1/f weight. """ weight = 1/f if normalize: weight /= max(weight) return(weight)
54301aa7480e6f3520cbfcccfa463a2a02d34b9c
3,568
def get_at_content(sequence): """Return content of AT in sequence, as float between 0 and 1, inclusive. """ sequence = sequence.upper() a_content = sequence.count('A') t_content = sequence.count('T') return round((a_content+t_content)/len(sequence), 2)
6316d29cdb9d7129f225f2f79a50485fb6919e32
3,570
import time def get_current_date() ->str: """Forms a string to represent the current date using the time module""" if len(str(time.gmtime()[2])) == 1: current_date = str(time.gmtime()[0]) + '-' + str(time.gmtime()[1]) + '-0' + str(time.gmtime()[2]) else: current_date = str(time.gmtime()[0]) + '-' + str(time.gmtime()[1]) + '-' + str(time.gmtime()[2]) return current_date
480d44fc0153407960eacb875474fc02cb17c6c3
3,573
from datetime import datetime def read_raw(omega): """Read the raw temperature, humidity and dewpoint values from an OMEGA iServer. Parameters ---------- omega : :class:`msl.equipment.record_types.EquipmentRecord` The Equipment Record of an OMEGA iServer. Returns ------- :class:`str` The serial number of the OMEGA iServer. :class:`dict` The data. """ nprobes = omega.connection.properties.get('nprobes', 1) nbytes = omega.connection.properties.get('nbytes') error = None try: cxn = omega.connect() thd = cxn.temperature_humidity_dewpoint(probe=1, nbytes=nbytes) if nprobes == 2: thd += cxn.temperature_humidity_dewpoint(probe=2, nbytes=nbytes) cxn.disconnect() except Exception as e: error = str(e) thd = [None] * (nprobes * 3) now_iso = datetime.now().replace(microsecond=0).isoformat(sep='T') data = { 'error': error, 'alias': omega.alias, 'datetime': now_iso, 'report_number': None, } if len(thd) == 3: data.update({ 'temperature': thd[0], 'humidity': thd[1], 'dewpoint': thd[2] }) else: data.update({ 'temperature1': thd[0], 'humidity1': thd[1], 'dewpoint1': thd[2], 'temperature2': thd[3], 'humidity2': thd[4], 'dewpoint2': thd[5] }) return omega.serial, data
105e07d26774288319459ebdc485d75c3a909212
3,582
def logical_array(ar): """Convert ndarray (int, float, bool) to array of 1 and 0's""" out = ar.copy() out[out!=0] = 1 return out
74d96d519929ed7f5ddfd92b0fbcef4741a38359
3,586
def shifted(x): """Shift x values to the range [-0.5, 0.5)""" return -0.5 + (x + 0.5) % 1
c40585748120af5d0acd85e4fed49f0575a92a3d
3,592
def add_colon(in_str): """Add colon after every 4th character.""" return ':'.join([in_str[i:i+4] for i in range(0, len(in_str), 4)])
fa4258aa9d684a087d2a81ae09a2702d6e58e3e1
3,598
def get_alt_pos_info(rec): """Returns info about the second-most-common nucleotide at a position. This nucleotide will usually differ from the reference nucleotide, but it may be the reference (i.e. at positions where the reference disagrees with the alignment's "consensus"). This breaks ties arbitrarily. Parameters ========== rec: dict pysamstats record for a given position in an alignment produced by stat_variation(). Returns ======= (cov, alt nt freq, alt nt): tuple of (int, int, str) Describes the second-most-common nucleotide at a position. The first entry in this tuple is the (mis)match coverage at this position. This is an integer defined as the sum of A, C, G, T nucleotides at this position (note that this excludes degenerate nucleotides like N -- we could change this in the future if that'd be useful, I suppose). Note that this coverage could be zero, if no reads are aligned to this specific position. The second entry is the raw frequency of this nucleotide at this position: this will be an integer greater than or equal to 0. This is also referred to in the paper, etc. as alt(pos). The third entry is just the alternate nucleotide (one of A, C, G, T), represented as a string. This is returned for reference -- as of writing this isn't actually needed for Pleuk itself, but I have other code outside of Pleuk that benefits from this! """ cov = rec["A"] + rec["C"] + rec["G"] + rec["T"] ordered_nts = sorted("ACGT", key=rec.get) # The literal nucleotide used in the numerator of freq(pos): one of A, C, # G, T alt_nt = ordered_nts[-2] # The raw frequency (in counts) of alt_nt. An integer >= 0. alt_nt_freq = rec[alt_nt] return (cov, alt_nt_freq, alt_nt)
3abe3fcbbf0ddbccb44025f2e476f77dc3e8abf9
3,599
def _is_segment_in_block_range(segment, blocks): """Return whether the segment is in the range of one of the blocks.""" for block in blocks: if block.start <= segment.start and segment.end <= block.end: return True return False
e7509f18f0a72cf90fb1aa643c77c2e13154f0d0
3,603
def generate_episode(sim, policy, horizon=200): """ Generate an episode from a policy acting on an simulation. Returns: sequence of state, action, reward. """ obs = sim.reset() policy.reset() # Reset the policy too so that it knows its the beginning of the episode. states, actions, rewards = [], [], [] states.append(obs) for _ in range(horizon): action = policy.act(obs) obs, reward, done, _ = sim.step(action) states.append(obs) actions.append(action) rewards.append(reward) if done: break states.pop() # Pop off the terminating state return states, actions, rewards
73a0bbb2703c047d3305e93dd2a340c83db12277
3,605
def value_or_dash(value): """Converts the given value to a unicode dash if the value does not exist and does not equal 0.""" if not value and value != 0: return u'\u2013'.encode('utf-8') return value
8cadbfd8dcfad9dfeb4112cb8537f0e0d5de49ba
3,615
import pkg_resources def get_resource(name): """Convenience method for retrieving a package resource.""" return pkg_resources.resource_stream(__name__, name)
63aada8f6e99956b770bd9ea7f737d90432c3f90
3,617
def error_message() -> str: """Error message for invalid input""" return 'Invalid input. Use !help for a list of commands.'
2ffea48dd495d464264bc657ca62cfe6043a1084
3,618
def how_many(aDict): """ aDict: A dictionary, where all the values are lists. returns: int, how many values are in the dictionary. """ return sum(len(value) for value in aDict.values())
ed1729b55411f29626dfe61c6853bc19813ceedc
3,624
def crop_keypoint_by_coords(keypoint, crop_coords, crop_height, crop_width, rows, cols): """Crop a keypoint using the provided coordinates of bottom-left and top-right corners in pixels and the required height and width of the crop. """ x, y, a, s = keypoint x1, y1, x2, y2 = crop_coords cropped_keypoint = [x - x1, y - y1, a, s] return cropped_keypoint
5a2365a611275fea4d0f5d031127426c88c43905
3,625
def string_between(string, start, end): """ Returns a new string between the start and end range. Args: string (str): the string to split. start (str): string to start the split at. end (str): string to stop the split at. Returns: new string between start and end. """ try: return str(string).split(str(start), 1)[1].split(str(end))[0] except IndexError: return ""
fc6f2a3def4112140539c90abe6304f5daa8c1f4
3,626
def row_to_columns(row): """Takes a row as a string and returns it as a list of columns.""" return [column for column in row.split() if column.strip() != '']
837477f2e9c160b93c339a9753e0598ac56c819e
3,639
def is_circular(linked_list): """ Determine whether the Linked List is circular or not Args: linked_list(obj): Linked List to be checked Returns: bool: Return True if the linked list is circular, return False otherwise The way we'll do this is by having two pointers, called "runners", moving through the list at different rates. Typically we have a "slow" runner which moves at one node per step and a "fast" runner that moves at two nodes per step. If a loop exists in the list, the fast runner will eventually move behind the slow runner as it moves to the beginning of the loop. Eventually it will catch up to the slow runner and both runners will be pointing to the same node at the same time. If this happens then you know there is a loop in the linked list. Below is an example where we have a slow runner and a fast runner (the red arrow). """ slow = linked_list.head fast = linked_list.head #as fast runner will reach end first if there is no loop so #adding a None check on just fast should be enough while fast and fast.next: slow = slow.next #move fast runner 2 times to make it fast as compared to slow runner fast = fast.next.next if fast == slow: return True # If we get to a node where fast doesn't have a next node or doesn't exist itself, # the list has an end and isn't circular return False
5a641df602f983de78c9c74b825847412aa54c21
3,645
def precisionatk_implementation(y_true, y_pred, k): """Fujnction to calculate precision at k for a given sample Arguments: y_true {list} -- list of actual classes for the given sample y_pred {list} -- list of predicted classes for the given sample k {[int]} -- top k predictions we are interested in """ # if k = 0 return 0 as we should never have k=0 # as k is always >=1 if k == 0: return 0 # as we are interested in top k predictions y_pred = y_pred[:k] # convert predictions to set pred_set = set(y_pred) # convert actual values to set true_set = set(y_true) # find comon values in both common_values = pred_set.intersection(true_set) # return length of common values over k return len(common_values) / len(y_pred[:k])
945caa95b32681939569ca675475e2527dbdee78
3,647
def AskNumber(text="unknown task"): """ Asks the user to interactively input a number (float or int) at any point in the script, and returns the input number. | __option__ | __description__ | --- | --- | *text | an optional string to identify for what purpose the chosen number will be used. """ def ValidateNumber(text): try: innumber = input("\n\nwrite a comma or integer number to use for "+str(text)+" (example: 15 or 15.83)\nnumber = ") except NameError: print("""\n---> unknown error""") return ValidateNumber(text) if not isinstance(innumber,(float,int)): print("""\n---> error: the number must be either a floating point comma or integer number""") return ValidateNumber(text) return innumber return ValidateNumber(text)
41949d0a2e2d87b5cdb26d2db9bff9a64fbeeb1d
3,648
import itertools def split_and_pad(s, sep, nsplit, pad=None): """ Splits string s on sep, up to nsplit times. Returns the results of the split, pottentially padded with additional items, up to a total of nsplit items. """ l = s.split(sep, nsplit) return itertools.chain(l, itertools.repeat(None, nsplit+1-len(l)))
6c439301df7109d9b01a06a87bd7d6adafb8ee1e
3,650
def _shape_from_resolution(resolution): """ Calculate the shape of the global Earth relief grid given a resolution. Parameters ---------- resolution : str Same as the input for load_earth_relief Returns ------- shape : (nlat, nlon) The calculated shape. Examples -------- >>> _shape_from_resolution('60m') (181, 361) >>> _shape_from_resolution('30m') (361, 721) >>> _shape_from_resolution('10m') (1081, 2161) """ minutes = int(resolution[:2]) nlat = 180*60//minutes + 1 nlon = 360*60//minutes + 1 return (nlat, nlon)
c726d599696cee2259bc450606e63480b0991451
3,652
def get_fuel_from(mass: int) -> int: """Gets fuel from mass. Args: mass (int): mass for the fuel Returns: int: fuel necessary for the mass """ return mass // 3 - 2
37390c8cb9ba7e84c7b5c14841528d6c38f1589e
3,653
def getLines(filename): """Return list of lines from file""" with open(filename, 'r', errors='ignore') as ff: return ff.readlines()
36e515decaa3876eed3b5db8363fb81a5db89c84
3,658
def has_poor_grammar(token_strings): """ Returns whether the output has an odd number of double quotes or if it does not have balanced parentheses. """ has_open_left_parens = False quote_count = 0 for token in token_strings: if token == '(': if has_open_left_parens: return True else: has_open_left_parens = True elif token == ')': if has_open_left_parens: has_open_left_parens = False else: return True elif token == '"': quote_count += 1 return quote_count % 2 == 1 or has_open_left_parens
b35c6af0ec771ac22ff66d9ca875f5d916cb9489
3,669
def tick2dayfrac(tick, nbTicks): """Conversion tick -> day fraction.""" return tick / nbTicks
50d01778f62203d37e733a6b328455d3ea10e239
3,673
def url_to_filename(base, url): """Return the filename to which the page is frozen. base -- path to the file url -- web app endpoint of the page """ if url.endswith('/'): url = url + 'index.html' return base / url.lstrip('/')
35084e8b5978869bf317073c76bafc356a7d9046
3,676
def _msd_anom_3d(time, D_alpha, alpha): """3d anomalous diffusion function.""" return 6.0*D_alpha*time**alpha
e5204c52368202665e4dd4acd7d86096349c0d29
3,677
import json def dump_into_json(filename, metrics): """Dump the metrics dictionary into a JSON file It will automatically dump the dictionary: metrics = {'duration': duration, 'voltage_extremes': voltage_extremes, 'num_beats': num_beats, 'mean_hr_bpm': mean_hr_bpm, 'beats': beats}. in to a JSON file with the file name as the data file name. :param filename: name of the file being read :param metrics: a dictionary containing duration, voltage extremes, number of beats, beats per minute, and the time where beats occur :returns: - successful_JSON - test if it has successfully create JSON """ successful_JSON = False try: output_file = open(filename + '.json', 'w') json.dump(metrics, output_file) output_file.close() successful_JSON = True except TypeError: print("Unsuccessfully output JSON file") return successful_JSON
2e6effbcefe7cb3033c4c472cbee3850c00ae06b
3,683
def is_primitive(v): """ Checks if v is of primitive type. """ return isinstance(v, (int, float, bool, str))
d22607c0e2b93b82b1da6beb50de68668624dd71
3,687
def linkify_only_full_urls(attrs, new=False): """Linkify only full links, containing the scheme.""" if not new: # This is an existing <a> tag, leave it be. return attrs # If the original text doesn't contain the scheme, don't linkify. if not attrs['_text'].startswith(('http:', 'https:')): return None return attrs
89fcc7f3fc53353686260779ae8ddb4c0523c57b
3,688
import re def replace_subject_with_object(sent, sub, obj): """Replace the subject with object and remove the original subject""" sent = re.sub(r'{}'.format(obj), r'', sent, re.IGNORECASE) sent = re.sub(r'{}'.format(sub), r'{} '.format(obj), sent, re.IGNORECASE) return re.sub(r'{\s{2,}', r' ', sent, re.IGNORECASE)
1c7f8115968c4e4ef10dcc3b83f0f259433f5082
3,695
def judgement(seed_a, seed_b): """Return amount of times last 16 binary digits of generators match.""" sample = 0 count = 0 while sample <= 40000000: new_a = seed_a * 16807 % 2147483647 new_b = seed_b * 48271 % 2147483647 bin_a = bin(new_a) bin_b = bin(new_b) last16_a = bin_a[-16:] last16_b = bin_b[-16:] if last16_a == last16_b: count += 1 seed_a = new_a seed_b = new_b sample += 1 return count
9d778909ba6b04e4ca3adbb542fce9ef89d7b2b7
3,698
def accession(data): """ Get the accession for the given data. """ return data["mgi_marker_accession_id"]
132dcbdd0712ae30ce7929e58c4bc8cdf73aacb2
3,699
import re def load_mac_vendors() : """ parses wireshark mac address db and returns dict of mac : vendor """ entries = {} f = open('mac_vendors.db', 'r') for lines in f.readlines() : entry = lines.split() # match on first column being first six bytes r = re.compile(r'^([0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2})$') if len(entry) > 0 and r.match(entry[0]) : # lowercase as convention entries[entry[0].lower()] = entry[1] return entries
361e9c79de8b473c8757ae63384926d266b68bbf
3,701
def reorganize_data(texts): """ Reorganize data to contain tuples of a all signs combined and all trans combined :param texts: sentences in format of tuples of (sign, tran) :return: data reorganized """ data = [] for sentence in texts: signs = [] trans = [] for sign, tran in sentence: signs.append(sign) trans.append(tran) data.append((signs, trans)) return data
27b4efd99bbf470a9f8f46ab3e34c93c606d0234
3,702
import mimetypes def img_mime_type(img): """Returns image MIME type or ``None``. Parameters ---------- img: `PIL.Image` PIL Image object. Returns ------- mime_type : `str` MIME string like "image/jpg" or ``None``. """ if img.format: ext = "." + img.format return mimetypes.types_map.get(ext.lower()) return None
fe46af6e5c03a1ae80cb809c81ab358ac5c085fa
3,704
def filter_out_nones(data): """ Filter out any falsey values from data. """ return (l for l in data if l)
39eb0fb7aafe799246d231c5a7ad8a150ed4341e
3,707
def factor_size(value, factor): """ Factors the given thumbnail size. Understands both absolute dimensions and percentages. """ if type(value) is int: size = value * factor return str(size) if size else '' if value[-1] == '%': value = int(value[:-1]) return '{0}%'.format(value * factor) size = int(value) * factor return str(size) if size else ''
41b061fb368d56ba18b52cd7a6a3322292671d83
3,709
import array def ordinate(values,maxrange,levels): """Ordinate values given a maximum data range and number of levels Parameters: 1. values: an array of continuous values to ordinate 2. maxrange: the maximum data range. Values larger than this will be saturated. 3. levels: the number of levels at which values are ordinated """ quantizer=lambda dist,maxrange,levels: int(1.0*max(1,dist-1)*levels/maxrange)+1 if type(values)==list or type(values)==tuple or type(values)==array: ordinated=[] for v in values: if v==0: ordinated.append(v) else: ordinated.append(quantizer(v,maxrange,levels)) return ordinated else: if values==0: return values else: return quantizer(values,maxrange,levels)
4db4a26579d9208cd90ec630cf82e54a4a7ec3fe
3,713
import time def time_as_int() -> int: """ Syntactic sugar for >>> from time import time >>> int(time()) """ return int(time.time())
f7f6d037d156c09a01c0ff13f8b43418133ab1b0
3,724
def should_retry_http_code(status_code): """ :param status_code: (int) http status code to check for retry eligibility :return: (bool) whether or not responses with the status_code should be retried """ return status_code not in range(200, 500)
69acb5bd34b06e1ff1e29630ac93e60a3ccc835c
3,725
import re def eq_portions(actual: str, expected: str): """ Compare whether actual matches portions of expected. The portions to ignore are of two types: - ***: ignore anything in between the left and right portions, including empty - +++: ignore anything in between left and right, but non-empty :param actual: string to test :param expected: expected string, containing at least one of the two patterns :return: a list of the portions ignored; if empty, it means there is no match. >>> eq_portions('', '+++aaaaaa***ccccc+++eeeeeee+++') () >>> eq_portions('_1__aaaaaa__2__ccccc_3__eeeeeee_4_', '+++aaaaaa***ccccc+++eeeeeee+++') ('_1__', '__2__', '_3__', '_4_') >>> eq_portions('_1__aaaaaaccccc_3__eeeeeee_4_', '+++aaaaaa***ccccc+++eeeeeee+++') ('_1__', '', '_3__', '_4_') >>> eq_portions('_1__aaaaaaccccc_3__eeeeeee', '+++aaaaaa***ccccc+++eeeeeee+++') () >>> eq_portions('aaaaaaccccc_3__eeeeeee', '+++aaaaaa***ccccc+++eeeeeee') () >>> eq_portions('aaaaaa_1__ccccc__2_eeeeeee', '***aaaaaa***ccccc+++eeeeeee***') ('', '_1__', '__2_', '') >>> eq_portions('aaaaaa___ccccc___eeeeeee', '***aaaaaa') () >>> eq_portions('aaaaaa___ccccc___eeeeeee', 'aaaaaa') Traceback (most recent call last): ... ValueError: The 'expected' argument must contain at least one *** OR +++ """ re_expect = re.escape(expected) ANYTHING = re.escape('\\*' * 3) SOMETHING = re.escape('\\+' * 3) if not re.search(ANYTHING, re_expect) and not re.search(SOMETHING, re_expect): raise ValueError("The 'expected' argument must contain at least one *** OR +++") re_expect = re.sub(SOMETHING, '(.+)', re_expect) re_expect = re.sub(ANYTHING, '(.*)', re_expect) matches = re.fullmatch(re_expect, actual) if not matches: return () return matches.groups()
704b2a83575347c5143c2dc0aca5227a8fc5bd4b
3,727
def is_comprehension(leaf): """ Return true if the leaf is the beginning of a list/set/dict comprehension. Returns true for generators as well """ if leaf.type != 'operator' or leaf.value not in {'[', '(', '{'}: return False sibling = leaf.get_next_sibling() return (sibling.type in {'testlist_comp', 'dictorsetmaker'} and sibling.children[-1].type == 'sync_comp_for')
11fff76ff8ed19b3d57359b56db886c003603a86
3,730
def make_exposure_shares(exposure_levels, geography="geo_nm", variable="rank"): """Aggregate shares of activity at different levels of exposure Args: exposure_levels (df): employment by lad and sector and exposure ranking geography (str): geography to aggregate over variable (str): variable we want to calculate shares over """ exp_distr = ( exposure_levels.groupby(["month_year", variable, geography])["value"] .sum() .reset_index(drop=False) .groupby([geography, "month_year"]) .apply(lambda x: x.assign(share=lambda df: df["value"] / df["value"].sum())) ).reset_index(drop=True) return exp_distr
02d990f2b08e3acb2a2b8ac01e44848770bdea71
3,734