content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def fexists(sftp, path): """os.path.exists for paramiko's SCP object """ try: sftp.stat(path) except IOError: return False else: return True
3cff765bbc8cc3f5ed3a3165473961ebfc04ec94
28,038
import random def random_guesser_v1(passage): """Takes in a string and returns a dictionary with 6 keys for the binary values representing features of the string, and another six keys representing the scores of those features. Note: for the scores, 0 represents no score, while -1 represents '?' Arguments --------- passage: string to be converted to dictionary with feature scores. """ features_dict = {} features = ["is_accountability", "is_unobjectivity", "is_inaccuracy", "is_fact-basedness", "is_influential", "is_opinionated"] features_score = ["score_accountability", "score_unobjectivity", "score_inaccuracy", "score_fact-basedness", "score_influential", "score_opinionated"] for i in range(len(features)): features_dict[features[i]] = random.randrange(2) features_dict[features_score[i]] = random.randrange(-1, 4) return features_dict
b2759839fcdd59d36aa2bf6643750970affc77a1
28,042
def most_similar(train, test, distances): """ get the most similar program name Parameters ---------- train: list a list of string containing names of training programs test: list a list containing names of test programs distances: matrix matrix of distances where distances[i][j] is the distance of train[i] to test[j] Return ------ a list bench_list where bench_list[i] is the name of the closest program from train of test[i] """ bench_list = {}#[None for i in range(len(test))] for j in range(len(test)): bench = train[0] dist = distances[0][j] for i in range(len(train)): #print(train[i],test[j],distances[i][j]) if distances[i][j] < dist: bench = train[i] dist = distances[i][j] bench_list[test[j]] = bench return bench_list
324722574bbbdbda61e7e4bc65669c2ce9674630
28,045
def decimal_hours(timeobject, rise_or_set: str) -> float: """ Parameters ---------- timeobject : datetime object Sunrise or -set time rise_or_set: string 'sunrise' or 'sunset' specifiying which of the two timeobject is Returns ------- float time of timeobject in decimal hours """ assert rise_or_set == "sunrise" or rise_or_set == "sunset" if timeobject: ret = timeobject.hour + timeobject.minute / 60 if ret == 0: return 0.0 else: return ret elif rise_or_set == "sunrise": return 0.0 else: return 23.999
44fe260abf8751cb78cf6e484dbf223d05233713
28,046
def sqrt(pop): """ Returns square root of length of list :param pop: List :return: Square root of size of list """ return len(pop) ** 0.5
877ae17cbe2cdd3a5f5b2cb03fc8d0b7af48c916
28,052
import re def is_hex_color(color: str) -> bool: """ Checks if a given color string is a valid hex color :param color: :return: """ match = re.search(r"^#(?:[0-9a-fA-F]{3}){1,2}$", color) if not match: return False return True
a86632883ccc05bc393b1b310c0fdf2eff559e55
28,057
import logging def get_logger(name): """ Retrieves a logger. :param name: The name of the logger :returns: The requested logger :rtype: logging.getLogger instance """ log = logging.getLogger(name) log.setLevel(logging.ERROR) return log
45d36a78d1a076123a93b3460774056685befc1e
28,058
def get_cls_db(db_name): """ Get benchmark dataset for classification """ if db_name.lower() == 'cls_davis': return './dataset/classification/DAVIS' elif db_name.lower() == 'cls_biosnap': return './dataset/classification/BIOSNAP/full_data' elif db_name.lower() == 'cls_bindingdb': return './dataset/classification/BindingDB'
89cf36b94299e4a4e1b2a4880ae1e891e37e77ac
28,061
import pathlib def get_file_size(file_path): """ Returns file size """ file = pathlib.Path(file_path) return file.stat().st_size
aa90923cd117b2b96a76c8d29d6218e3a577d5df
28,069
import math def deRuiter_radius(src1, src2): """Calculates the De Ruiter radius for two sources""" # The errors are the square root of the quadratic sum of # the systematic and fitted errors. src1_ew_uncertainty = math.sqrt(src1.ew_sys_err**2 + src1.error_radius**2) / 3600. src1_ns_uncertainty = math.sqrt(src1.ns_sys_err**2 + src1.error_radius**2) / 3600. src2_ew_uncertainty = math.sqrt(src2.ew_sys_err**2 + src2.error_radius**2) / 3600. src2_ns_uncertainty = math.sqrt(src2.ns_sys_err**2 + src2.error_radius**2) / 3600. ra_nom = ((src1.ra - src2.ra) * math.cos(math.radians(0.5 * (src1.dec + src2.dec))))**2 ra_denom = src1_ew_uncertainty**2 + src2_ew_uncertainty**2 ra_fac = ra_nom / ra_denom dec_nom = (src1.dec - src2.dec)**2 dec_denom = src1_ns_uncertainty**2 + src2_ns_uncertainty**2 dec_fac = dec_nom / dec_denom dr = math.sqrt(ra_fac + dec_fac) return dr
55c7f174b61c249427f09cec3c885c049f1d38dc
28,075
def fix_data(text): """Add BOS and EOS markers to sentence.""" if "<s>" in text and "</s>" in text: # This hopes that the text has been correct pre-processed return text sentences = text.split("\n") # Removing any blank sentences data sentences = ["<s> " + s + " </s>" for s in sentences if len(s.strip()) > 0] return " ".join(sentences)
b502784e9e8fa8875030730595dcaaae66e2f31b
28,083
import base64 def b64_encode(value: bytes) -> bytes: """ URL safe base 64 encoding of a value :param value: bytes :return: bytes """ return base64.urlsafe_b64encode(value).strip(b"=")
40a7dfbec7ec390a71cdacc5ab54ce8e2a092754
28,084
def comment_scalar(a_dict, key): """Comment out a scalar in a ConfigObj object. Convert an entry into a comment, sticking it at the beginning of the section. Returns: 0 if nothing was done. 1 if the ConfigObj object was changed. """ # If the key is not in the list of scalars there is no need to do anything. if key not in a_dict.scalars: return 0 # Save the old comments comment = a_dict.comments[key] inline_comment = a_dict.inline_comments[key] if inline_comment is None: inline_comment = '' # Build a new inline comment holding the key and value, as well as the old inline comment new_inline_comment = "%s = %s %s" % (key, a_dict[key], inline_comment) # Delete the old key del a_dict[key] # If that was the only key, there's no place to put the comments. Do nothing. if len(a_dict.scalars): # Otherwise, put the comments before the first entry first_key = a_dict.scalars[0] a_dict.comments[first_key] += comment a_dict.comments[first_key].append(new_inline_comment) return 1
f2121caa4e58ec88527ae128a0ac9669efa066d7
28,089
from datetime import datetime def xml2date(s): """Convert XML time string to python datetime object""" return datetime.strptime(s[:22]+s[23:], '%Y-%m-%dT%H:%M:%S%z')
762480533d8e64544b4b4c4c67093098dcfebb56
28,091
def atom2dict(atom, dictionary=None): """Get a dictionary of one of a structure's :class:`diffpy.structure.Structure.atoms` content. Only values necessary to initialize an atom object are returned. Parameters ---------- atom : diffpy.structure.Structure.atom Atom in a structure. dictionary : dict, optional Dictionary to update with structure atom information. If None (default), a new dictionary is created. Returns ------- dictionary : dict Dictionary with structure atoms information. """ if dictionary is None: dictionary = {} dictionary.update( { attribute: atom.__getattribute__(attribute) for attribute in ["element", "label", "occupancy", "xyz", "U"] } ) return dictionary
6873c64bd39d211a43f302375d6a6c76e3bd0b7e
28,103
def get_connectivity(input_nodes): """Create a description of the connections of each node in the graph. Recurrent connections (i.e. connections of a node with itself) are excluded. Args: input_nodes (:obj:`list` of :obj:`Node`): the input operations of the model. Returns: graph (:obj:`dict` of :obj:`dict` of :obj:`set`): a description of the graph's connectivity in terms of inbound-outbound nodes of each node. """ graph = dict() nodes = input_nodes.copy() while len(nodes) != 0: # select a node current_node = nodes.pop(0) # if no information has been collected yet, set up dict entry if current_node not in graph: graph[current_node] = {'inbound': set(), 'outbound': set()} # scroll through current node's outbound nodes for node in current_node.outbound_nodes: # skip recurrent connections (for RNN cells) if node == current_node: continue # if no information has been collected yet, set up dict entry if node not in graph: nodes.append(node) graph[node] = {'inbound': set(), 'outbound': set()} # add reciprocal connectivity information graph[current_node]['outbound'].add(node) graph[node]['inbound'].add(current_node) return graph
ea14ff70f4c821744079219a2ec3ee50acc0483b
28,104
def get_message_with_context(msg: str, context: str) -> str: """ Concatenates an error message with a context. If context is empty string, will only return the error message. :param msg: the message :param context: the context of the message :return: the message with context """ if len(context) == 0: return msg else: msg = "\t" + "\n\t".join(msg.splitlines()) return "%s\n%s" % (context, msg)
8d625b297ba4510fdef3476138bafd1e210fcaa6
28,105
def unique(list1, list2): """Get the unique items that are in the first list but not in the second list. NOTE: unique(l1,l2) is not always equal to unique(l2,l1) Args: list1 (list): A list of elements. list2 (list): A list of elements. Returns: list: A list with the unique elements. Examples: >>> unique([1,2,3], [2,3,4]) [1] >>> unique([2,3,4], [1,2,3]) [4] """ return list(set(list1) - set(list2))
9e870319287ef7296cb5f54dc1089dc676ecc553
28,109
def get_classes(module, superclass=None): """ Return a list of new-style classes defined in *module*, excluding _private and __magic__ names, and optionally filtering only those inheriting from *superclass*. Note that both arguments are actual modules, not names. This method only returns classes that were defined in *module*. Those imported from elsewhere are ignored. """ objects = [ getattr(module, name) for name in dir(module) if not name.startswith("_")] # filter out everything that isn't a new-style # class, or wasn't defined in *module* (ie, it # is imported from somewhere else) classes = [ obj for obj in objects if isinstance(obj, type) and (obj.__module__ == module.__name__) ] # if a superclass was given, filter the classes # again to remove those that aren't its subclass if superclass is not None: classes = [ cls for cls in classes if issubclass(cls, superclass)] return classes
9b31c7179a29b148e8e7503fa8bc06282e1248b4
28,112
from hashlib import md5 as _md5 def get_filesize_and_checksum(filename): """Opens the file with the passed filename and calculates its size and md5 hash Args: filename (str): filename to calculate size and checksum for Returns: tuple (int,str): size of data and its md5 hash """ md5 = _md5() size = 0 with open(filename, "rb") as f: for chunk in iter(lambda: f.read(4096), b""): md5.update(chunk) size += len(chunk) return (size, str(md5.hexdigest()))
b8e1c0dc6bcb9785d2c6f47dbebe6ccedf18b1af
28,113
def exception_str(exc): """Call this to get the exception string associated with the given exception or tuple of the form (exc, traceback) or (exc_class, exc, traceback). """ if isinstance(exc, tuple) and len(exc) == 3: return str(exc[1]) return str(exc)
eb3729cc49a5e346fbd9c064f2fe0fc4ef4bd2c2
28,129
def xidz(numerator, denominator, value_if_denom_is_zero): """ Implements Vensim's XIDZ function. This function executes a division, robust to denominator being zero. In the case of zero denominator, the final argument is returned. Parameters ---------- numerator: float denominator: float Components of the division operation value_if_denom_is_zero: float The value to return if the denominator is zero Returns ------- numerator / denominator if denominator > 1e-6 otherwise, returns value_if_denom_is_zero """ small = 1e-6 # What is considered zero according to Vensim Help if abs(denominator) < small: return value_if_denom_is_zero else: return numerator * 1.0 / denominator
45782f957c56a0f91528d6d945c5d7887fd68e95
28,131
def mock_exists(file_map, fn): """ mock os.path.exists() """ return (fn in file_map)
f68741c4bd4da3e5f4d32dce1ef9c249495c8f5a
28,132
def linear_function(x,m,b): """ Get the y value of a linear function given the x value. Args: m (float): the slope b (float): the intercept Returns: The expected y value from a linear function at some specified x value. """ return m*x + b
929882eb3f3e4b0767458c63ed17ca67fc6ab17f
28,133
def pp_timestamp(t): """ Get a friendly timestamp represented as a string. """ if t is None: return '' h, m, s = int(t / 3600), int(t / 60 % 60), t % 60 return "%02d:%02d:%05.2f" % (h, m, s)
d1766cabcff9f09c145d98536900e9a82e734f63
28,139
def remove_hook_words(text, hook_words): """ removes hook words from text with one next word for text = "a b c d e f" and hook_words = ['b', 'e'] returns "a d" (without b, e and next words) """ words = text.split() answer = [] k = 0 while k < len(words): if words[k] in hook_words: k += 2 else: answer.append(words[k]) k += 1 return ' '.join(answer)
aaa1706fe7d9322d900ef346f3189824d06f8df2
28,142
def is_u2f_enabled(user): """ Determine if a user has U2F enabled """ return user.u2f_keys.all().exists()
39b3b93e03eee230fc978ad5ec64ba4965b2d809
28,149
def extend_feature_columns(feature_columns): """ Use to define additional feature columns, such as bucketized_column and crossed_column Default behaviour is to return the original feature_column list as is Args: feature_columns: [tf.feature_column] - list of base feature_columns to be extended Returns: [tf.feature_column]: extended feature_column list """ # examples - given: # 'x' and 'y' are two numeric features: # 'alpha' and 'beta' are two categorical features # feature_columns['alpha_X_beta'] = tf.feature_column.crossed_column( # [feature_columns['alpha'], feature_columns['beta']], int(1e4)) # # num_buckets = parameters.HYPER_PARAMS.num_buckets # buckets = np.linspace(-2, 2, num_buckets).tolist() # # feature_columns['x_bucketized'] = tf.feature_column.bucketized_column( # feature_columns['x'], buckets) # # feature_columns['y_bucketized'] = tf.feature_column.bucketized_column( # feature_columns['y'], buckets) # # feature_columns['x_bucketized_X_y_bucketized'] = tf.feature_column.crossed_column( # [feature_columns['x_bucketized'], feature_columns['y_bucketized']], int(1e4)) return feature_columns
11d0c77331745719d445f7926f041e2fe1c70903
28,150
def clean_data(players): """Sanitize the list of player data.""" cleaned_players = [] for player in players: cleaned_player = {} for key, value in player.items(): cleaned_value = value if key == "height": cleaned_value = int(value[0:2]) elif key == "experience": cleaned_value = value == "YES" elif key == "guardians": cleaned_value = value.split(" and ") cleaned_player[key] = cleaned_value cleaned_players.append(cleaned_player) return cleaned_players
88836eb154ff1a3cb32c567634adf813ac234de6
28,153
import shutil def exe_exists(exe): """ Returns the full path if executable exists and is the path. None otherwise """ return shutil.which(exe)
38bed97a3d195e6adc8e0dba936d213828f15e2f
28,156
def confusion_stats(set_true, set_test): """ Count the true positives, false positives and false negatives in a test set with respect to a "true" set. True negatives are not counted. """ true_pos = len(set_true.intersection(set_test)) false_pos = len(set_test.difference(set_true)) false_neg = len(set_true.difference(set_test)) return true_pos, false_pos, false_neg
401b216b1317f16a424830e71b48f1d21d603956
28,159
def category_parents(category): """ Get list parents of category :param category: Object category, product. e.g <8c8fff64-8886-4688-9a90-24f0d2d918f9> :return: Dict | List | List categories and sub-categories. e.g. [ { "id": "c0136516-ff72-441a-9835-1ecb37357c41", "name": "Sombreros y Gorros" }, { "id": "ff2cbe7e-a817-41d5-9363-6175bb757505", "name": "Accesorios" }, { "id": "7b68e61c-516a-45b4-8eda-654f3af39e03", "name": "ROPA MUJER" } ] """ if not category.parent_category: return [] parent_list = [] while category.parent_category: parent_list.append({ 'id': str(category.id), 'name': category.name, } ) category = category.parent_category parent_list.append({ 'id': str(category.id), 'name': category.name, } ) return parent_list
84616c76fb4179eb2f91600699e1f07d0de9b15f
28,162
from typing import Iterable from typing import Dict from typing import Any from typing import List import collections def _list_dict_to_dict_list(samples: Iterable[Dict[Any, Any]]) -> Dict[Any, List[Any]]: """Convert a list of dictionaries to a dictionary of lists. Args: samples: a list of dictionaries Returns: a dictionary of lists .. versionadded:: 0.2 """ collated = collections.defaultdict(list) for sample in samples: for key, value in sample.items(): collated[key].append(value) return collated
a5bd1dea71306f5e8151f6dfcc94b96f328b6d76
28,165
def count_rule_conditions(rule_string: str) -> int: """ Counts the number of conditions in a rule string. Parameters ---------- rule_string : str The standard Iguanas string representation of the rule. Returns ------- int Number of conditions in the rule. """ n_conditions = rule_string.count("X['") return n_conditions
4dc8bc3fdc7ee4d4302101a39b7849bcd7dff6e8
28,166
def check_length_of_shape_or_intercept_names(name_list, num_alts, constrained_param, list_title): """ Ensures that the length of the parameter names matches the number of parameters that will be estimated. Will raise a ValueError otherwise. Parameters ---------- name_list : list of strings. Each element should be the name of a parameter that is to be estimated. num_alts : int. Should be the total number of alternatives in the universal choice set for this dataset. constrainted_param : {0, 1, True, False} Indicates whether (1 or True) or not (0 or False) one of the type of parameters being estimated will be constrained. For instance, constraining one of the intercepts. list_title : str. Should specify the type of parameters whose names are being checked. Examples include 'intercept_params' or 'shape_params'. Returns ------- None. """ if len(name_list) != (num_alts - constrained_param): msg_1 = "{} is of the wrong length:".format(list_title) msg_2 = "len({}) == {}".format(list_title, len(name_list)) correct_length = num_alts - constrained_param msg_3 = "The correct length is: {}".format(correct_length) total_msg = "\n".join([msg_1, msg_2, msg_3]) raise ValueError(total_msg) return None
d83ed7d6989c7e3ccdbbb256eaa72759a7f242d3
28,167
def get_token_object(auth): """ Retrieve the object or instance from a token creation. Used for knox support. :param auth: The instance or tuple returned by the token's .create() :type auth tuple | rest_framework.authtoken.models.Token :return: The instance or object of the token :rtype: rest_framework.authtoken.models.Token | knox.models.AuthToken """ return auth[0] if isinstance(auth, tuple) else auth
3651951e413f3fd44f159ed6070542d54f2923b2
28,169
def format_interconnector_loss_demand_coefficient(LOSSFACTORMODEL): """Re-formats the AEMO MSS table LOSSFACTORMODEL to be compatible with the Spot market class. Examples -------- >>> LOSSFACTORMODEL = pd.DataFrame({ ... 'INTERCONNECTORID': ['X', 'X', 'X', 'Y', 'Y'], ... 'REGIONID': ['A', 'B', 'C', 'C', 'D'], ... 'DEMANDCOEFFICIENT': [0.001, 0.003, 0.005, 0.0001, 0.002]}) >>> demand_coefficients = format_interconnector_loss_demand_coefficient(LOSSFACTORMODEL) >>> print(demand_coefficients) interconnector region demand_coefficient 0 X A 0.0010 1 X B 0.0030 2 X C 0.0050 3 Y C 0.0001 4 Y D 0.0020 Parameters ---------- LOSSFACTORMODEL : pd.DataFrame ================= ====================================================================================== Columns: Description: INTERCONNECTORID unique identifier of a interconnector (as `str`) REGIONID unique identifier of a market region (as `str`) DEMANDCOEFFICIENT the coefficient of regional demand variable in the loss factor equation (as `np.float64`) ================= ====================================================================================== Returns ---------- demand_coefficients : pd.DataFrame ================== ========================================================================================= Columns: Description: interconnector unique identifier of a interconnector (as `str`) region the market region whose demand the coefficient applies too, required (as `str`) demand_coefficient the coefficient of regional demand variable in the loss factor equation (as `np.float64`) ================== ========================================================================================= """ demand_coefficients = LOSSFACTORMODEL.loc[:, ['INTERCONNECTORID', 'REGIONID', 'DEMANDCOEFFICIENT']] demand_coefficients.columns = ['interconnector', 'region', 'demand_coefficient'] return demand_coefficients
a7a87543eedb33248f6532ec234d47b7fe5455b3
28,170
from datetime import datetime def to_time_string(value): """ gets the time string representation of input datetime with utc offset. for example: `23:40:15` :param datetime | time value: input object to be converted. :rtype: str """ time = value if isinstance(value, datetime): time = value.timetz() return time.isoformat(timespec='seconds')
b81ffff8b4ab626e0094bcfeb0bf6916de89d344
28,172
def u(string: str) -> bytes: """Shortcut to encode string to bytes.""" return string.encode('utf8')
3310da2d9f24be94c0426128ac19db2481dd2c2d
28,173
def dictToTuple(heading, d): """Convert dict into an ordered tuple of values, ordered by the heading""" return tuple([d[attr] for attr in heading])
1a76201da4e5348a4c4ba9607992ad41a1c87163
28,179
def twoballs_filename(data_dir, num_examples, num_feat, num_noise_feat, frac_flip): """Generate filename to save data and permutations""" data_filename = data_dir + '/twoballs_n=%d_%d:%d_rcn=%1.1f.csv'\ % (num_examples, num_feat, num_noise_feat, frac_flip) perm_filename = data_dir + '/twoballs_n=%d_%d:%d_rcn=%1.1f_perm.txt'\ % (num_examples, num_feat, num_noise_feat, frac_flip) return (data_filename, perm_filename)
8b965192527088017ca5544894c1257e22222caf
28,182
def getcompanyrow(values): """ :param values: :return: list of values representing a row in the company table """ companyrow = [] companyrow.append(values['_COMPANYNUMBER_']) companyrow.append(values['_COMPANYNAME_']) companyrow.append(values['_WEBADDRESS_']) companyrow.append(values['_STREETADDRESS1_']) companyrow.append(values['_STREETADDRESS2_']) companyrow.append(values['_CITY_']) companyrow.append(values['_STATE_']) companyrow.append(values['_ZIPCODE_']) companyrow.append(values['_NOTES_']) companyrow.append(values['_PHONE_']) return companyrow
ea7b96c13797cf9aeeaa6da4d25e9144e2fc4524
28,184
def _get_deconv_pad_outpad(deconv_kernel): """Get padding and out padding for deconv layers.""" if deconv_kernel == 4: padding = 1 output_padding = 0 elif deconv_kernel == 3: padding = 1 output_padding = 1 elif deconv_kernel == 2: padding = 0 output_padding = 0 else: raise ValueError(f"Not supported num_kernels ({deconv_kernel}).") return deconv_kernel, padding, output_padding
3c4a161e2d67bdb81d7e60a5e65ce232a4b0d038
28,186
import requests import shutil def download_file(url): """ Download file at given URL. - url: url of file to be downloaded - return: downloaded file name """ filename = url.split('/')[-1] r = requests.get(url, stream=True) with open(filename, 'wb') as f: shutil.copyfileobj(r.raw, f) return filename
b153f84f6f04299e460e19b9703d2ebd30804144
28,187
def recip_to_duration(recip): """Convert a humdrum recip string to a wholenote duration. """ # Breves are indicated by zero. if recip[0] == '0': duration = 2 else: duration = float(recip.rstrip('.')) ** -1 dots = recip.count('.') return (2 * duration) - duration*(2.0 ** (-1 * dots))
200e86809488ab90df2e9dc0dde8bf7260804bc8
28,192
import random def weighted_sampler(pop_dict, k=1): """randomly sample a dictionary's keys based on weights stored as values example: m = {'a':3, 'b':2, 'c':5} samps = weighted_sampler(m, k=1000) #samps should be a ~ 300, b ~ 200, and c ~ 500 >>> samps.count('a') 304 >>> samps.count('b') 211 >>> samps.count('c') 485 of course, being a random sampler your results will vary""" vals = list(pop_dict.keys()) weights = [pop_dict[i] for i in vals] return random.choices(vals, weights = weights, k=k)
421cd16931a4b6695c8800cbc140aa86b9ce413a
28,196
def points_to_vector(point1, point2): """ Return vector from point1 to point2 """ return point2 - point1
b8333afc6ecf6dbc8e1a9571b64d195ba896a73e
28,198
import torch def vector_to_index(vector, all_zeros=-1): """ Converts a binary vector to a list of indices corresponding to the locations where the vector was one. """ l = len(vector) integers = torch.Tensor([i+1 for i in range(l)]) # i+1 so that the zeroth element and the zeros vector below don't conflict zeros = torch.zeros(l) indices = torch.where(vector==1, integers, zeros) # replace an element with its index+1, otherwise zero flattenned_indices = indices.nonzero() - 1 # Remove all zero elements, and then correct for the i+1 from before if len(flattenned_indices) == 0: return torch.Tensor([[all_zeros]]) else: return flattenned_indices
ba052dc3ed81e188249ad5e0192d864675412807
28,201
def enforce_use_of_all_cpus(model): """For sklearn models which have an `n_jobs` attribute, set to -1. This will force all cores on the machine to be used. Args: model : sklearn model A trainable sklearn model Returns: model : sklearn model Model 'as is' with `n_jobs` set to one if it exists """ setattr(model, 'n_jobs', -1) return model
6fb7878700ffc2fea960432ed76f6d1d90638a32
28,203
from contextlib import redirect_stdout import io def capture_stdout(func, *args, **kwargs): """Capture standard output to a string buffer""" stdout_string = io.StringIO() with redirect_stdout(stdout_string): func(*args, **kwargs) return stdout_string.getvalue()
4b9f4ed54644a28850d0f68e2dda1f484fa9644c
28,205
import torch def l2_dist_close_reward_fn(achieved_goal, goal, threshold=.05): """Giving -1/0 reward based on how close the achieved state is to the goal state. Args: achieved_goal (Tensor): achieved state, of shape ``[batch_size, batch_length, ...]`` goal (Tensor): goal state, of shape ``[batch_size, batch_length, ...]`` threshold (float): L2 distance threshold for the reward. Returns: Tensor for -1/0 reward of shape ``[batch_size, batch_length]``. """ if goal.dim() == 2: # when goals are 1-dimentional assert achieved_goal.dim() == goal.dim() achieved_goal = achieved_goal.unsqueeze(2) goal = goal.unsqueeze(2) return -(torch.norm(achieved_goal - goal, dim=2) >= threshold).to( torch.float32)
ca65cb272f13a6caa5f88c75d4129bf15dc3c22d
28,208
def update_kvk(kvk): """ Function to update outdated KvK-numbers :param kvk: the orginal KvK-number :return: the updated KvK-number, if it was updated """ # Make sure KvK-number is a string kvk = str(kvk) # Add zero to beginning of outdated KvK-number and return it if len(kvk) == 7: new_kvk = '0'+kvk return new_kvk # If KvK-number is up to date, just return it else: return kvk
61b1d490de866786330a698e0370a360856c14a9
28,209
from typing import Union def check_positive_int(input_int: Union[str, int]) -> int: """Check if `input_int` is a positive integer. If it is, return it as an `int`. Raise `TypeError` otherwise """ input_int = int(input_int) if input_int <= 0: raise ValueError(f"A positive integer is expected, got {input_int}") return input_int
686b062a3df929541a708fca0df10d3ae5a09088
28,210
def has_perm(user, perm): """Return True if the user has the given permission, false otherwise.""" return user.has_perm(perm)
01f3395f45c5ef0274b4b68fc557fa5f8e7b9466
28,211
def format_date_string(date_str): """ :param date_str: expects the format Thu Feb 28 14:51:59 +0000 2019 :return: a dictionary containing day,month,hour,min """ # month_val represents the month value month_val = { 'Jan':1, 'Feb':2, 'Mar':3, 'Apr':4, 'May':5, 'Jun':6, 'Jul':7, 'Aug':8, 'Sep':9, 'Oct':10, 'Nov':11, 'Dec':12 } time = date_str.split(" ")[3] time = time.split(":") date = date_str.split(" ") # date[1] is month and date[2] is day of the month # print(date) date = [date[1],date[2]] day = int(date[1]) month = month_val[date[0]] hour = int(time[0]) minutes = int(time[1]) time = {} time['day'] = day time['month'] = month time['hour'] = hour time['min'] = minutes return time
dd77f0b87c84c0e1be57fa40c1b6bc2fdda0ad75
28,215
def get_oidc_auth(token=None): """ returns HTTP headers containing OIDC bearer token """ return {'Authorization': token}
a951cfdf83c5a0def3128ce409ced5db4fa8d3b6
28,218
def bytes_from_hex(hexcode): """ Given a valid string of whitespace-delimited hexadecimal numbers, returns those hex numbers translated into byte string form. """ return ''.join(chr(int(code, 16)) for code in hexcode.split())
fbb800f0ea7f1327b42965fab48b740fad251027
28,225
def validate_split_durations(train_dur, val_dur, test_dur, dataset_dur): """helper function to validate durations specified for splits, so other functions can do the actual splitting. First the functions checks for invalid conditions: + If train_dur, val_dur, and test_dur are all None, a ValueError is raised. + If any of train_dur, val_dur, or test_dur have a negative value that is not -1, an ValueError is raised. -1 is interpreted differently as explained below. + If only val_dur is specified, this raises a ValueError; not clear what durations of training and test set should be. Then, if either train_dur or test_dur are None, they are set to 0. None means user did not specify a value. Finally the function validates that the sum of the specified split durations is not greater than the the total duration of the dataset, `dataset_dur`. If any split is specified as -1, this value is interpreted as "first get the split for the set with a value specified, then use the remainder of the dataset in the split whose duration is set to -1". Functions that do the splitting have to "know" about this meaning of -1, so this validation function does not modify the value. Parameters ---------- train_dur : int, float Target duration for training set split, in seconds. val_dur : int, float Target duration for validation set, in seconds. test_dur : int, float Target duration for test set, in seconds. dataset_dur : int, float Total duration of dataset of vocalizations that will be split. Returns ------- train_dur, val_dur, test_dur : int, float """ if val_dur and (train_dur is None and test_dur is None): raise ValueError( "cannot specify only val_dur, unclear how to split dataset into training and test sets" ) # make a dict so we can refer to variable by name in loop split_durs = { "train": train_dur, "val": val_dur, "test": test_dur, } if all([dur is None for dur in split_durs.values()]): raise ValueError( "train_dur, val_dur, and test_dur were all None; must specify at least train_dur or test_dur" ) if not all( [dur >= 0 or dur == -1 for dur in split_durs.values() if dur is not None] ): raise ValueError( "all durations for split must be real non-negative number or " "set to -1 (meaning 'use the remaining dataset)" ) if sum([split_dur == -1 for split_dur in split_durs.values()]) > 1: raise ValueError( "cannot specify duration of more than one split as -1, unclear how to calculate durations of splits." ) # set any None values for durations to 0; no part of dataset will go to that split for split_name in split_durs.keys(): if split_durs[split_name] is None: split_durs[split_name] = 0 if -1 in split_durs.values(): total_other_splits_dur = sum([dur for dur in split_durs.values() if dur != -1]) if total_other_splits_dur > dataset_dur: raise ValueError( "One dataset split duration was specified as -1, but the total of the other durations specified, " f"{total_other_splits_dur} s, is greater than total duration of Dataset, {dataset_dur}." ) else: # if none of the target durations are -1 total_splits_dur = sum(split_durs.values()) if total_splits_dur > dataset_dur: raise ValueError( f"Total of the split durations specified, {total_splits_dur} s, " f"is greater than total duration of dataset, {dataset_dur}." ) return split_durs["train"], split_durs["val"], split_durs["test"]
351bcd963d68e70d434ce7939cc4d01359285d1f
28,229
from pathlib import Path from typing import Any import csv def load_csv(path: Path) -> Any: """Load data from csv file.""" with open(path, newline='') as csvfile: reader = csv.DictReader(csvfile) items = list(reader) return items
3a6d071f34bf239fb9de5c9eaabcaf6e71021373
28,233
def train_valid_split(x_train, y_train, split_index=45000): """Split the original training data into a new training dataset and a validation dataset. Args: x_train: An array of shape [50000, 3072]. y_train: An array of shape [50000,]. split_index: An integer. Returns: x_train_new: An array of shape [split_index, 3072]. y_train_new: An array of shape [split_index,]. x_valid: An array of shape [50000-split_index, 3072]. y_valid: An array of shape [50000-split_index,]. """ x_train_new = x_train[:split_index] y_train_new = y_train[:split_index] x_valid = x_train[split_index:] y_valid = y_train[split_index:] return x_train_new, y_train_new, x_valid, y_valid
fd8eb959fd67c5a5cdfca0399e8b4fae1ff654d8
28,241
import struct def structparse_ip_header_info(bytes_string: bytes): """Takes a given bytes string of a packet and returns information found in the IP header such as the IP Version, IP Header Length, and if IP Options are present. Examples: >>> from scapy.all import *\n >>> icmp_pcap = rdpcap('icmp.pcap')\n >>> firstpacket = icmp_pcap[0]\n >>> thebytes_firstpacket = firstpacket.__bytes__()\n >>> structparse_ip_header_len(thebytes_firstpacket)\n {'ip_version': 4, 'ip_header_len': 20, 'info': 'IHL = 20 bytes, No IP Options Present'} References: https://docs.python.org/3/library/struct.html Args: bytes_string (bytes): Reference a bytes string representation of a packet. Returns: dict: Returns a dictionary. """ # # This is an alternate way to get to the data we want, but we want to demo the usage of struct # - ip_layer_plus = bytes_string[14:] # - ip_byte0 = ip_layer_plus[0] # # This uses 'network byte order' (represented by '!') which is Big Endian (so we could have use '>' instead of '!'); we then ignore the first 14 bytes (which is the Ethernet header) using '14x', and process the next 1 byte as an unsigned integer using 'B' # - we use the [0] because the '.unpack()' method always returns a tuple, even when a single element is present, and in this case we just want a single element ip_byte0 = ( struct.unpack('!14xB', bytes_string[:15]) )[0] # Doing a 'bit shift' of 4 bits to the right, pushing the most significant nibble to the right, and pushing the least significant nibble "over the cliff". In other words, all that remains of our original 8 bits are the 4 left-most bits (the right-most 4 bits were pushed off of the cliff on the right side when we did the bit shift to the right) ip_version = ip_byte0 >> 4 # Using the Bitwise AND operator "&" ip_header_len = (ip_byte0 & 15) * 4 # if ip_header_len < 20: some_info = "IHL is < 20 bytes, something is wrong" elif ip_header_len == 20: some_info = "IHL = 20 bytes, No IP Options Present" else: some_info = "IHL > 20 bytes, IP Options are Present" # results = {} results['ip_version'] = ip_version results['ip_header_len'] = ip_header_len results['info'] = some_info return results
af8be564ec68d8ecc7f91d8483309a6312f42263
28,247
def get_config(cfg): """ Sets the hypermeters (architecture) for ISONet using the config file Args: cfg: A YACS config object. """ config_params = { "net_params": { "use_dirac": cfg.ISON.DIRAC_INIT, "use_dropout": cfg.ISON.DROPOUT, "dropout_rate": cfg.ISON.DROPOUT_RATE, "nc": cfg.DATASET.NUM_CLASSES, "depths": cfg.ISON.DEPTH, "has_bn": cfg.ISON.HAS_BN, "use_srelu": cfg.ISON.SReLU, "transfun": cfg.ISON.TRANS_FUN, "has_st": cfg.ISON.HAS_ST, } } return config_params
ea871170b7d70efde0601bbce340b3960227a459
28,254
def num2vhdl_slv(num, width=4): """ Creates a VHDL slv (standard_logic_vector) string from a number. The width in bytes can be specified. Examples: num2vhdl_slv(10, width=1) => x"0A" num2vhdl_slv(0x10, width=2) => x"0010" """ return ('x"%0' + str(width * 2) + 'X"') % num
a9fb6ce594bdb8756d073ae268cb03dccda7592e
28,256
import jinja2 def ps_filter(val): """Jinja2 filter function 'ps' escapes for use in a PowerShell commandline""" if isinstance(val, jinja2.Undefined): return "[undefined]" escaped = [] for char in str(val): if char in "`$#'\"": char = "`" + char elif char == '\0': char = "`0" elif char == '\a': char = "`a" elif char == '\b': char = "`b" elif char == '\f': char = "`f" elif char == '\n': char = "`n" elif char == '\r': char = "`r" elif char == '\t': char = "`t" elif char == '\v': char = "`v" escaped.append(char) return ''.join(escaped)
495cd87bfc930089aaa5f4f9b282d20b4883bfb5
28,257
import operator def multiply_round(n_data: int, cfg: dict): """ Given a configuration {split: percentage}, return a configuration {split: n} such that the sum of all is equal to n_data """ print(cfg) s_total = sum(cfg.values()) sizes = {name: int(s * n_data / s_total) for name, s in cfg.items()} max_name = max(sizes.items(), key=operator.itemgetter(1))[0] sizes[max_name] += n_data - sum(sizes.values()) return sizes
6727de1f6f9bc70aa9d5bb3b53ccf73381e4a86d
28,259
from typing import List def get_combinations(candidates: List[int], target: int) -> List[List[int]]: """Returns a list of lists representing each possible set of drops. This function (and its recursive helper function) was adapted from https://wlcoding.blogspot.com/2015/03/combination-sum-i-ii.html. Args: candidates: A list of possible numbers of pieces to drop on a square. Effectively, this arg is equivalent to range(1, carry_size + 1). target: The number of stones in the carry_size. The number of dropped stones must equal the number of stones picked up. Returns: A list of lists of possible combinations. Note that these lists do not contain every permutation of drops, merely every combination of valid ints that sums to the target value. """ def get_combinations_rec(candidates, target, index, partial_sum, list_t, combinations) -> None: """A recursive helper function for get_combinations.""" if partial_sum == target: combinations.append(list(list_t)) for i in range(index, len(candidates)): if partial_sum + candidates[i] > target: break list_t.append(candidates[i]) get_combinations_rec( candidates, target, i, partial_sum+candidates[i], list_t, combinations ) list_t.pop() combinations: List = [] get_combinations_rec(candidates, target, 0, 0, [], combinations) return combinations
9d68f3b69c23697d924e40d4471296223871165a
28,260
import json def simple_aimfree_assembly_state() -> dict: """ Fixture for creating the assembly system DT object for tests from a JSON file. - Complexity: simple """ with open('tests/assets/simple/aimfree_assembly_state.json') as assembly_file: aimfree_assembly_state = json.load(assembly_file) return aimfree_assembly_state
a1cdd6e85a90d39604214d2d49a52cedbbc4b165
28,262
def implicit_valence(a): """ Implicit valence of atom """ return a.GetImplicitValence()
88b0e7682e8d142d7f0e10caa37fe67bbe4fa2e2
28,270
def joint_card(c1,c2): """Given two cardinalities, combine them.""" return '{' + ('1' if c1[1] == c2[1] == '1' else '0') + ':' + ('1' if c1[3] == c2[3] == '1' else 'M') + '}'
f9df53869d68cd7c48916ede0396787caeebadaf
28,274
def _is_hierachy_searchable(child_id: str) -> bool: """ If the suffix of a child_id is numeric, the whole hierarchy is searchable to the leaf nodes. If the suffix of a child_id is alphabetic, the whole hierarchy is not searchable. """ pieces_of_child_id_list = child_id.split('.') suffix = pieces_of_child_id_list[len(pieces_of_child_id_list) - 1] return suffix.isnumeric()
13146128fc8ab050323a23f07133676caeb83aaf
28,275
import re def make_job_def_name(image_name: str, job_def_suffix: str = "-jd") -> str: """ Autogenerate a job definition name from an image name. """ # Trim registry and tag from image_name. if "amazonaws.com" in image_name: image_name = image_name.split("/", 1)[1].split(":")[0].replace("/", "-") # https://docs.aws.amazon.com/batch/latest/userguide/create-job-definition.html # For Job definition name, enter a unique name for your job definition. Up to 128 letters # (uppercase and lowercase), numbers, hyphens, and underscores are allowed. job_def_prefix = re.sub("[^A-Za-z_0-9-]", "", image_name)[: 128 - len(job_def_suffix)] job_def_name = job_def_prefix + job_def_suffix return job_def_name
e81e86e8df750434a1a9310d99256733ea0f8619
28,276
import itertools def generate_grid_search_trials(flat_params, nb_trials): """ Standard grid search. Takes the product of `flat_params` to generate the search space. :param params: The hyperparameters options to search. :param nb_trials: Returns the first `nb_trials` from the combinations space. If this is None, all combinations are returned. :return: A dict containing the hyperparameters. """ trials = list(itertools.product(*flat_params)) if nb_trials: trials = trials[0:nb_trials] return trials
d35072aa26fa62b60add89f36b4343ee4e93567b
28,279
def convert_string_list(string_list): """ Converts a list of strings (e.g. ["3", "5", "6"]) to a list of integers. In: list of strings Out: list of integers """ int_list = [] for string in string_list: int_list.append(int(string)) return int_list
b75ba67c142796af13186bc4d7f67e3061a1d829
28,282
import csv def read_csv(file_name): """ Read csv file :param file_name: <file_path/file_name>.csv :return: list of lists which contains each row of the csv file """ with open(file_name, 'r') as f: data = [list(line) for line in csv.reader(f)][2:] return data
01f9aadc0bce949aa630ab050e480da24c53cc40
28,286
def get_names_from_lines(lines, frac_len, type_function): """Take list of lines read from a file, keep the first fract_len elements, remove the end of line character at the end of each element and convert it to the type definded by function. """ return [type_function(line[:-1]) for line in lines[:frac_len]]
c9b42ef1388c0cd09b3d7d5e6a7381411438200e
28,288
def execroi(img, roi): """ Args: img(np.array): 2 dimensions roi(2-tuple(2-tuple)) Returns: np.array: cropped image """ return img[roi[0][0] : roi[0][1], roi[1][0] : roi[1][1]]
4c59dd52186888b2b1c43007a00301a43237dcb3
28,292
def selected_features_to_constraints(feats, even_not_validated=False): """ Convert a set of selected features to constraints. Only the features that are validated are translated into constraints, otherwise all are translated when `even_not_validated` is set. :return: str """ res = "" for sel in reversed(feats): sel_str = sel['other'] mode = sel['mode'] if sel['validated'] or even_not_validated: if mode == 'selected': res += "[ " + sel_str + " ]" + "\n" elif mode == 'rejected': res += "[ !" + sel_str + " ]" + "\n" return res
e029c26b0e53874b4076c9a4d9065a558736f565
28,293
def iso_string_to_sql_utcdatetime_mysql(x: str) -> str: """ Provides MySQL SQL to convert an ISO-8601-format string (with punctuation) to a ``DATETIME`` in UTC. The argument ``x`` is the SQL expression to be converted (such as a column name). """ return ( f"CONVERT_TZ(STR_TO_DATE(LEFT({x}, 26)," f" '%Y-%m-%dT%H:%i:%s.%f')," f" RIGHT({x}, 6)," # from timezone f" '+00:00')" # to timezone ) # In MySQL: # 1. STR_TO_DATE(), with the leftmost 23 characters, # giving microsecond precision, but not correct for timezone # 2. CONVERT_TZ(), converting from the timezone info in the rightmost 6 # characters to UTC (though losing fractional seconds)
1117b228c77f187b5884aeb014f2fb80309ea93a
28,294
def compute_rmsd(frame, ref): """Compute RMSD between a reference and a frame""" return ref.rmsd(frame)
b189453a409d02279851bd492a95757d1d25bccc
28,297
def iou_score(SR, GT): """Computes the IOU score""" smooth = 1e-8 SR = (SR > 0.5).float() inter = SR * GT union = SR + GT return inter.sum() / (union.sum() + smooth)
05627c3b5c62422318a2968bab0a5cfe4430b3b6
28,298
def docstring_parameter(**kwargs): """ Decorates a function to update the docstring with a variable. This allows the use of (global) variables in docstrings. Example: @docstring_parameter(config_file=CONFIG_FILE) myfunc(): \"\"\" The config file is {config_file} \"\"\" Args: **kwargs: Declarations of string formatters. Raises: KeyError: If formatters are not found in the docstring. """ def decorate(obj): obj.__doc__ = obj.__doc__.format(**kwargs) return obj return decorate
c69505037948f120a7c29ee500f2327001e8b80d
28,299
def arg_to_dict(arg): """Convert an argument that can be None, list/tuple or dict to dict Example:: >>> arg_to_dict(None) [] >>> arg_to_dict(['a', 'b']) {'a':{},'b':{}} >>> arg_to_dict({'a':{'only': 'id'}, 'b':{'only': 'id'}}) {'a':{'only':'id'},'b':{'only':'id'}} :return: dict with keys and dict arguments as value """ if arg is None: arg = [] try: arg = dict(arg) except ValueError: arg = dict.fromkeys(list(arg), {}) return arg
adc75f811d02770b34be2552445b192e33401e76
28,302
def enum(**named_values): """ Create an enum with the following values. :param named_values: :return: enum :rtype: Enum """ return type('Enum', (), named_values)
794007a79e43c3ff4af2f70efa3817c224e42bd7
28,310
def fib_1_recursive(n): """ Solution: Brute force recursive solution. Complexity: Description: Number of computations can be represented as a binary tree has height of n. Time: O(2^n) """ if n < 0: raise ValueError('input must be a positive whole number') if n in [0, 1]: return n return fib_1_recursive(n - 2) + fib_1_recursive(n - 1)
beb4a726075fed152da34706394ac1bd7ef29f17
28,311
def crop_center(img,cropx,cropy,cropz): """ Crops out the center of a 3D volume """ x,y,z = img.shape startx = x//2-(cropx//2) starty = y//2-(cropy//2) startz = z//2-(cropz//2) return img[startx:startx+cropx,starty:starty+cropy,startz:startz+cropz]
a003fb7fdbcee5e6d4a8547a2f50fa82181bdf37
28,314
def get_text(spec): """Reads the contents of the given file""" with open(spec) as fh: return fh.read()
21766304777d483af403375678389519ca1bcfe1
28,315
def _sum_edge_attr(G, node, attr, method='edges', filter_key=None, split_on='-', include_filter_flags=None, exclude_filter_flags=None): """accumulate attributes for one node_id in network G Parameters ---------- G : networkx.Graph or networkx.MultiGraph a graph network to sum edge attributes at a given node. NOTE: For Directed graphs (DiGraph and MultiDiGraph) the 'edges' method is equivalent to the 'out_edges' method. node : string or int the networkx node at which to query edge attributes attr : string an edge attribute key that maps to an int or float. Strings will not throw an error, but string concatenation is not the purpose of this function. method : string, optional (default='edges') a method name to select edges for the summation. Valid options include 'edges' (default), 'in_edges' and 'out_edges'. NOTE: For Directed graphs (DiGraph and MultiDiGraph) the 'edges' method is equivalent to the 'out_edges' method. filter_key : string, optional (default=None) edge attribute key that will be searched by the filter flags kwargs split_on : string, optional (default='-') filter_key string will be split by this character to form a list of flags. include_filter_flags : list, optional (default=None) exclude_filter_flags : list, optional (default=None) Returns ------- float the sum of the values associated with the `attr` """ edges = getattr(G, method)(node, data=True) if not edges: return 0 includes = edges if include_filter_flags is not None: includes = [ edge for edge in edges if any([ i in str(edge[2][filter_key]).split(split_on) for i in include_filter_flags])] excludes = [] if exclude_filter_flags is not None: excludes = [ edge for edge in edges if any([ i in str(edge[2][filter_key]).split(split_on) for i in exclude_filter_flags])] edges = [i for i in includes if i not in excludes] return sum([data.get(attr, 0) for _from, _to, data in edges])
270f0afb943b6c6828a6cc8a22452ee5eabbcfb8
28,320
def get_column_indexes(column_name_items): """ This function returns the indexes for the columns of interest from the CSV file. :param column_name_items: List of column names :type column_name_items: list :return: Column index for 'TRUTH.TOTAL', 'QUERY.TP', 'QUERY.FP', 'TRUTH.FN', 'METRIC.Precision' and 'METRIC.Recall' :rtype: list """ total_index = column_name_items.index('TRUTH.TOTAL') tp_index = column_name_items.index('QUERY.TP') fp_index = column_name_items.index('QUERY.FP') fn_index = column_name_items.index('TRUTH.FN') precision_index = column_name_items.index('METRIC.Precision') recall_index = column_name_items.index('METRIC.Recall') return [total_index, tp_index, fp_index, fn_index, precision_index, recall_index]
51c38d048db6530bc502dfb64e95384ee096428a
28,324
def is_sorted(t): """Predicate, true if t is sorted in ascending order. t: list """ # sorted(t) will return a sorted version of t, without changing t. # == will compare the two lists to see if their value is the same # The is operator would fail here, even if the lists look identical # i.e. return (t is sorted(t)) would return false always. return t==sorted(t)
3c346a349cd0d870c5cef549a574674ea566ae6c
28,326
import typing def get_param_query(sql: str, params: dict) -> typing.Tuple[str, tuple]: """ Re-does a SQL query so that it uses asyncpg's special query format. :param sql: The SQL statement to use. :param params: The dict of parameters to use. :return: A two-item tuple of (new_query, arguments) """ if not params or len(params) < 1: return sql, () # Dump the params into key -> value pairs. kv = [(k, v) for (k, v) in params.items()] # Define the list of items that are used to return. items = [] fmt_dict = {} # Iterate over the key-values, adding each key to the fmt_dict with the number index. for n, (k, v) in enumerate(kv): n += 1 # Add to the format dict. fmt_dict[k] = "${}".format(n) items.append(v) # Finally, format the SQL with the required params. sql_statement = sql.format(**fmt_dict) return sql_statement, tuple(items)
44d2316f346ec53354d7ebeb69387c093ab3089b
28,327
def csv_list(value): """ Convert a comma separated string into a list Parameters ---------- value : str The string object to convert to a list Returns ------- list A list based on splitting the string on the ',' character """ if value: result = [] for item in value.split(','): item = item.strip() if item: result.append(item) return result return []
d65e004eb6696e7418e4f5f65a6271562c462cab
28,328
def find_section_id(sections, id): """ Find the section with a given id """ for idx, section in enumerate(sections): try: if section['id'] == id: return idx except KeyError: continue return None
5ee29faea5a0966873966fc85ecfe1f89b08ecbb
28,330
import typing def voiced_variants(base_phone) -> typing.Set[str]: """ Generate variants of voiced IPA phones Parameters ---------- base_phone: str Voiced IPA phone Returns ------- set[str] Set of base_phone plus variants """ return {base_phone + d for d in ["", "ʱ", "ʲ", "ʷ", "ⁿ", "ˠ", "̚"]} | { d + base_phone for d in ["ⁿ"] }
99111f1fcbfabb27a22efb75121d9f71cf76b64b
28,333
def clean_string(s: str, extra_chars: str = ""): """Method to replace various chars with an underscore and remove leading and trailing whitespace Parameters ---------- s : str string to clean extra_chars : str, optional additional characrters to be replaced by an underscore Returns ------- s: str clean string """ chars = "-.()%" + extra_chars for char in chars: s = s.replace(char, "_") s = s.strip() return s
7f93f1fea075bb09ba3b150a6ff768d0a266093c
28,335
from typing import Counter def count_characters_two( string ): """ Counts using collections.Count """ counter = Counter(string) return counter
d2c3b5eef156507f2b7b8b9a3b3b5a1a54a0a766
28,336
import torch def unique_2d(*X): """Get the unique combinations of inputs X. Parameters ---------- X : array-like of type=int and shape=(n_samples, n_features) Input events for which to get unique combinations Returns ------- *X_unique : np.array of shape=(n_samples_unique, n_features) Unique input event combinations inverse : np.array of shape=(n_samples,) Inverse used to reconstruct original values """ # Get input shapes shapes = [x.shape[1] for x in X] # Get unique combined inputs unique, inverse = torch.unique( torch.cat(X, dim=1), dim = 0, return_inverse = True ) # Retrieve original inputs result = list() previous = 0 for shape in shapes: result.append(unique[:, previous:previous+shape]) previous += shape # Add inverse result.append(inverse) # Return result return tuple(result)
8a4580c9dbbc8118f1f43d723874ccd26c4eb1ec
28,337
import torch def run_single(model: torch.nn.Module, *args) -> torch.Tensor: """ Runs a single element (no batch dimension) through a PyTorch model """ return model(*[a.unsqueeze(0) for a in args]).squeeze(0)
f0c74c90a403086cf1f0057a3ee4f8d785668e26
28,340
import hashlib def hash_file(file_path: str) -> str: """ return sha1 hash of the file """ with open(file_path, "r") as content_file: hash_object = hashlib.sha1(content_file.read().encode("utf-8")) return hash_object.hexdigest()
fb71d4610a3b081b5b69e49c721fa3a0da61e859
28,345
import torch def tensor(x): """Construct a PyTorch tensor of data type `torch.float64`. Args: x (object): Object to construct array from. Returns: tensor: PyTorch array of data type `torch.float64`. """ return torch.tensor(x, dtype=torch.float64)
2734d09e8c3a563dda48f7954029f3f857b3aff3
28,359
def _first_or_none(array): """ Pick first item from `array`, or return `None`, if there is none. """ if not array: return None return array[0]
e8430cf316e12b530471f50f26d4f34376d31ce2
28,363