content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
import uuid def get_uuid(s): """Return UUID for the string passed in.""" return str(uuid.uuid5(uuid.NAMESPACE_OID, str(s)))
f034e235ff3e673152216fbb84f9fc0ca85cfc41
16,405
import click def detect_config_version(config): """Return version of an slo-generator config based on the format. Args: config (dict): slo-generator configuration. Returns: str: SLO config version. """ if not isinstance(config, dict): click.secho( 'Config does not correspond to any known SLO config versions.', fg='red') return None api_version = config.get('apiVersion', '') kind = config.get('kind', '') if not kind: # old v1 format return 'v1' return api_version.split('/')[-1]
26cb4d7ae7eba981e456dc8f9201df719f720896
16,407
def o_to_matsubara_idx_b(o): """ Convert index in "o" convension to bosonic Matsubara index Parameters ---------- o 2*n Returns n ------- """ assert o%2 == 0 return int(o/2)
347313ac016033360910d94e19c7d3ef8bc3f7e3
16,408
def _get_warmup_factor_at_iter(method: str, curr_iter: int, warmup_iters: int, warmup_factor: float) -> float: """Return the learning rate warmup factor at a specific iteration. Parameters ---------- method: str Warmup method; either "constant" or "linear". curr_iter: int Iteration at which to calculate the warmup factor. warmup_iters: int The length of the warmup phases. warmup_factor: float The base warmup factor (the meaning changes according to the method used). Returns ------- float: The effective warmup factor at the given iteration. """ if curr_iter >= warmup_iters: return 1.0 if method == "constant": return warmup_factor if method == "linear": alpha = curr_iter / warmup_iters return warmup_factor * (1 - alpha) + alpha raise ValueError(f"Unknown warmup method: {method}")
5d53b32746450189eeca116f0599dbe00c05d82b
16,411
def pierce(t: float) -> float: """Calculates Pierce. Args: t (float): Air temperature [K]. Returns: float: Pierce. """ return 0.0658 * t**3 - 53.7558 * t**2 + 14703.8127 * t - 1345485.0465
94dd4ccf4de79ba9f91aa37578b68c1b6d462c05
16,412
def instrument_trial_pairs(df): """ Extract a list of all unique instrument/trial pairs. """ df_iter = df.groupby(['instrument', 'trial']).size().reset_index() return [(r['instrument'], r['trial']) for _, r in df_iter.iterrows()]
600e2a96e2bd64f3dc1128fbdbb881b3b1790719
16,413
def compare(pipeline1, pipeline2): """ Compare if two dataset pipelines are the same. Args: pipeline1 (Dataset): a dataset pipeline. pipeline2 (Dataset): a dataset pipeline. Returns: Whether pipeline1 is equal to pipeline2. Examples: >>> pipeline1 = ds.MnistDataset(mnist_dataset_dir, 100) >>> pipeline2 = ds.Cifar10Dataset(cifar_dataset_dir, 100) >>> ds.compare(pipeline1, pipeline2) """ return pipeline1.to_json() == pipeline2.to_json()
dacd93eabc63f6b51502f437fa625a14314e0740
16,416
def vec_subvec(a, r): """ Extracts a sub-vector from a given vector Parameters ---------- a: list[] A vector of scalar values r: tuple A pair (ps, pe) indicating the start and end points of the sub-vector dimension. If ps>pe then the sub-vector will be reversed. Values must be positive. Returns ------- list[] The sub-vector """ assert len(r) == 2 assert min(r) >= 0 step = 1 if r[0] <= r[1] else -1 return (a[r[0]:: step] if r[0] > r[1] == 0 else a[r[0]: r[1] + step: step])
d704f50c6269bf5a593ccdba5cdce67542a462d7
16,420
import re def detectMetadataFormat(xmldoc): """ Detect the format of the metadata in `xmldoc`. """ root = xmldoc if re.search("eml$", root.tag): return "eml" elif re.search("Dryad", root.tag): return "dryad" elif re.search("metadata", root.tag): return "fgdc" else: return "unknown"
6534f0f9c4bb3b2905be6a924ae8ceb0ce39ab1b
16,422
def astz(dt, tz): """ Given a datetime object and a timezone object, return a new datetime object that represents the same moment, but written in terms of the new timezone. :param dt: a datetime object :param tz: a timezone object :return: a datetime object """ # See http://pythonhosted.org/pytz/ for why this is # not as trivial as it might first appear. return tz.normalize(dt.astimezone(tz))
af98a3e9e6fccc21f09302b5f7e655149904e196
16,423
def image_generator_from_dataframe(dataframe, img_size, batch_size, cls_labels, datagen, color_mode='rgb'): """ Creates a generator that loads images from information in a pandas dataframe. The dataframe must have at least two columns: - "filename" with the absolute path to the file - "cls" with the class label of each image (text) Images will be preprocessed using an ImageDataGenerator, resized to a fixed shape and converted to grayscale if desired. :param dataframe: Pandas dataframe with the image information :param img_size: Shape of to resize the images to, e.g. (128, 128) :param batch_size: Size of the generator batch :param cls_labels: List containing each class label :param datagen: The ImageDataGenerator for preprocessing :param color_mode: 'rgb' or 'grayscale' to produce 3 or 1 channel images, respectively :return: """ return datagen.flow_from_dataframe( dataframe, x_col="filenames", y_col="cls", classes=cls_labels, target_size=img_size, batch_size=batch_size, color_mode=color_mode, interpolation='bilinear', class_mode='categorical')
d208b1e6ba36df1cee9d35c718431c110ab08304
16,425
def get_object_class(configvalue): """ Formats the objectclass line from the config into a list """ objclass = configvalue.split('|') return objclass
3cb4c9370d5e711fcadbbd4d0583b2967e9ebe6d
16,426
def find_default_dataset_and_split_names(datasets, default_dataset_name=None, default_split_name=None, train_split_name=None): """ Return a good choice of dataset name and split name, possibly not the train split. Args: datasets: the datasets default_dataset_name: a possible dataset name. If `None`, find a suitable dataset, if not, the dataset must be present default_split_name: a possible split name. If `None`, find a suitable split, if not, the dataset must be present. if `train_split_name` is specified, the selected split name will be different from `train_split_name` train_split_name: if not `None`, exclude the train split Returns: a tuple (dataset_name, split_name) """ if default_dataset_name is None: default_dataset_name = next(iter(datasets)) else: if default_dataset_name not in datasets: return None, None if default_split_name is None: available_splits = datasets[default_dataset_name].keys() for split_name in available_splits: if split_name != train_split_name: default_split_name = split_name break else: if default_split_name not in datasets[default_dataset_name]: return None, None return default_dataset_name, default_split_name
6a1c844109afb2fcd3fd9f85bc966377fa7b7bc2
16,432
def pearsoncc(x, y): """ Compute Pearson Correlation Coefficient. """ x = (x - x.mean(0)) / x.std(0) y = (y - y.mean(0)) / y.std(0) return (x * y).mean()
145471d2007feaef0c285312b645d07e6922d4c2
16,435
def get_projects_by_4(p): """ The frontend displays a list of projects in 4 columns. This function splits the list of the projects visible by the user in chunks of size 4 and returns it.""" # Split the list of visible projects by chunks of size 4 projects = sorted([e['id'] for e in p['projects']]) n = 4 # split projects in chunks of size 4 projects_by_4 = [projects[i * n:(i + 1) * n] for i in range((len(projects) + n - 1) // n)] return projects_by_4
c44e45c96a0d0869f0b8c2e6b61779feda59e4ee
16,438
def sdetectBatch(sess, image_tensor, tensor_dict, images): """ Detects objects on an already-configured session and tensor dict with a set of images """ output = sess.run(tensor_dict, feed_dict={image_tensor: images}) return output
a0d289df27dbfe8c20553d0362e9c6f0e5d5c1c4
16,440
def add_NA_indicator_variables(df, inplace=False): """ Add indicator variables for each column to indicate missingness. """ df_ = df if inplace else df.copy() for i, c in enumerate(df_.columns): x = df_[c].isna() if x.any(): df_.insert(i + 1, '{}_NA'.format(c), x) return df_
834b72f4df820d520cc2e2c1fb3605ad846a9f2f
16,443
import hashlib def gen_server_hash(server_id, shared_secret, public_key): """Generates the server hash for use in authentication. Parameters ---------- server_id : :class:`str` The server id found in :class:`~.EncryptionRequestPacket`. shared_secret The shared secret gotten from :func:`gen_shared_secret`. public_key The public key found in :class:`~.EncryptionRequestPacket`. Returns ------- :class:`str` The server hash. """ h = hashlib.sha1() h.update(server_id.encode("ascii")) h.update(shared_secret) h.update(public_key) return f"{int.from_bytes(h.digest(), byteorder='big', signed=True):x}"
f6294f68fa94a92fca1e1942d280a07535ce7abb
16,448
import base64 def PngFile_to_Base64(file_name): """Converts a png file to a base 64 encoded string""" in_file=open(file_name, "rb") encoded=base64.b64encode(in_file.read()).decode() return encoded
018aed2f85584ce4c585236afb58996b6952e852
16,450
def dicecoeff_precount(e1, e2, count): """ Dice coefficient measures the similarity of two bit patterns :param e1: bitarray1 :param e2: bitarray2 :param count: float bitcount1 + bitcount2 :return: real 0-1 similarity measure """ if count == 0: return 0 return 2*(e1 & e2).count()/count
d35658e5d369b7c36ee422d5b30980236c7112fb
16,452
def find_tts(prices): """Returns a list containing the buy day, sell day and resulting profit - finds the best days on which to buy and sell.""" buyday = 0 sellday = 0 profit = 0 for x in range(len(prices)): for y in range(x+1, len(prices)): if prices[x] < prices [y]: if prices[y] - prices[x] > profit: profit = prices[y] - prices[x] buyday = x sellday = y return [buyday, sellday, profit]
efa5bcf672d58b0f1a0562afb33fe9881668dd2b
16,456
def get_clusters_as_list(df): """Return a list of list of Event Args: df (DataFrame): see get_dataframe() Returns: list ot list of Event: list of event clusters """ return df.groupby('label')['event'].apply(list).values.tolist()
b49a8efeaef0506659a483cb9cb8431d284557a5
16,464
def saml_assertion_to_ldap_style_name(assertion_attributes): """ Return string, approximating a NOAA LDAP-style name for SAML user Keyword Parameters: assertion_attributes -- Dict, representing SAML assertion attributes for a logged in user >>> test_attributes = {'mail': ['[email protected]']} >>> saml_assertion_to_ldap_style_name(test_attributes) 'uid=pat.ng,ou=People,o=noaa.gov' """ # Adapt SAML user email assertion, into a LDAP-style name user_name, user_domain = assertion_attributes['mail'].pop().split('@') generated_ldap_id = 'uid={},ou=People,o={}'.format(user_name.lower(), user_domain) return generated_ldap_id
c21c1f461e5ad06721417fb51284e82b7b5128d7
16,466
import pathlib def _get_institution(path: pathlib.Path) -> str: """Returns the institution. As per docstring, this is index -3.""" return str(path).split('/')[-3]
4c42e40bc19dbe7c7e45f5984a6ff5cf940d4fd7
16,467
def calc_per_difference(dev_per_dict:dict) -> dict: """ Calculates the differecence between the speak testset PER and the training-dev sets. This difference is a measure of data mismatch. """ per_diff_dict = dict() for name, per in dev_per_dict.items(): if not name=='speak': diff_name = name + "-speak" per_diff_dict[diff_name] = dev_per_dict.get('speak', 0.0) - dev_per_dict.get(name, 0.0) return per_diff_dict
25f1b331e9b8ba0346422000a6cf3e68061dd8a2
16,468
def hash_diff(old_hash, new_hash): """ Returns the keys in new_hash that have different values in old_hash. Also returns keys that are in new_hash but not in old_hash. Keyword arguments: old_hash -- the dictionary of hashes for the old directory new_hash -- the dictionary of hashes for the new directory """ paths_changed = [] new_paths = [] for key, value in new_hash.items(): if key in old_hash: if value != old_hash[key]: paths_changed.append(key) else: new_paths.append(key) return (paths_changed, new_paths)
7c9c650c64371385843f8f7604eaa07209e9149f
16,474
def str2ints(stat_id_str: str) -> list: """Convert stat_id string e.g. 'account_id:tank_id' to list of ints""" return [ int(x) for x in stat_id_str.split(':') ]
15def7276ac9cfea86a5a8010b95de189f7750d5
16,479
def get_category(line1): """Collects breach category from the line. Args: line1 (str): 1.st line of data block Returns: str: breach category """ line1 = line1.split(' ') return line1[-2]
49da3f6efef3ed72dd8ba43795d297d04b8c20c8
16,483
import random def generate_random_offset(background_shape: tuple, object_shape: tuple) -> tuple: """ Generate a safe random offset for the background. :param background_shape: tuple :param object_shape: tuple :return: tuple - offset in x, y """ b_height, b_width = background_shape o_height, o_width = object_shape random_x = random.randrange(0, b_width - o_width, 1) random_y = random.randrange(0, b_height - o_height, 1) return random_x, random_y
c3d015f3be7add5ee1a472e8c73f0a32abca898e
16,485
def get_ad_sublist(adlist, names): """ Select a sublist of AstroData instances from the input list using a list of filename strings. Any filenames that don't exist in the AstroData list are just ignored. """ outlist = [] for ad in adlist: if ad.filename in names: outlist.append(ad) return outlist
61af4ebb3c4c7cd93af1576a6906c612aae5872d
16,486
def unaligned_words(f_words, e_words, biphrases): """Find unaligned words :param f_words: source words :param e_words: target words :param biphrases: list of phrase pairs (check `minimal_biphrases`) :returns: set of unaligned source words, set of unaligned target words """ fs = set() es = set() for fp, ep in biphrases: fs.update(fp) es.update(ep) return frozenset(range(len(f_words))) - fs, frozenset(range(len(e_words))) - es
dbee429e9f72b17d3e3ba311fdd4c17d2938bca1
16,490
def toTf(f): """ :param f: input pose :type f: :class:`PyKDL.Frame` Return a tuple (position, quaternion) for the pose. """ return ((f.p[0], f.p[1], f.p[2]), f.M.GetQuaternion())
3fe386803804b1c27919c47f1a1cf4a59b20b1ed
16,491
def predict_model(dataset, model): """ Method to predict reliability of dataset using the provided model :param dataset: dataset whose reliability is to be predicted :param model: model to be used to predict reliability :return: the reliabilities of the dataset """ for drop_column in ["is_reliable", "vic_x", "vix_y", "latitude", "longitude"]: if drop_column in dataset.columns: dataset = dataset.drop(columns=[drop_column]) return model.predict(dataset)
081f2fa73663c46b9f0018ba037acd0ce1d2d086
16,493
def nobrackets(v): """ Remove brackets """ return v.replace('[', '').replace(']', '')
807dafa83a743a94ca81666e979857ba8481eab9
16,494
def _set_args(args, line_args, file_name): """ Sets the arg list to contain all of the original CL arguments and the arguments provided in the file. :list args: copy of sys.argv command line args :list args_list: ordered list of key, value args from file :str file_name: name file to remove from args list """ args = args + line_args args.remove(file_name) return args
1c38acd5e34f7ee2e27f2396ec6c917ecfde996a
16,500
import csv def read_csv(input_file_path, verbose=False, delimiter=','): """ Function reads csv file and returns list of records :param input_file_path: path to csv file :param verbose: defines if verbose mode :param delimiter: fields separator in csv file :return: list of records """ result = [] with open(input_file_path) as csv_file: for row in csv.reader(csv_file, delimiter=delimiter): result.append(row) if verbose: print(result) return result
57597e820750b11382cd27e31135fa8b6f45153e
16,501
def get_metric_name_from_task(task: str) -> str: """Get the name of the metric for the corresponding GLUE task. If using `load_best_model_at_end=True` in TrainingArguments then you need `metric_for_best_model=metric_name`. Use this method to get the metric_name for the corresponding GLUE task. """ if task == "stsb": return "pearson" elif task == "cola": return "matthews_correlation" else: return "accuracy"
2b911db666e74345a288d1781c2368dfd7a22a74
16,503
def slice_image(im, dict_obj): """Slice the bounding box out of the image and return.""" left = dict_obj['left'] top = dict_obj['top'] right = dict_obj['right'] bottom = dict_obj['bottom'] im = im[top:bottom, left:right, :] return im
1c4a14386a6d70a922af6bfc2c7415553a48a52d
16,505
from typing import Optional def check_n_jobs(n_jobs: Optional[int] = None): """Parse the ``n_jobs`` parameter for multiprocessing.""" if n_jobs == -1: return None elif n_jobs is None: return 1 else: return n_jobs
0d9c67c2e995df9fb3e497db40466eab9b5041d2
16,507
from datetime import datetime def convert_to_date(date): """Convert to date with '%B %d, %Y' format.""" return datetime.strptime(date, '%B %d, %Y')
10db2be45c5cae52365858ed324540486f6e5eff
16,509
def get_dtim(tree, hart): """Get the DTIM associated with the hart""" dtim_ref = hart.get_field("sifive,dtim") if dtim_ref: dtim = tree.get_by_reference(dtim_ref) return dtim return None
991351235d4179d1e9ac75c3c9746020a81a9cc9
16,511
def _check_startyear(cfgs): """ Check to see that at most one startyear is defined in the config Returns ------- int startyear Raises ------ ValueError if more that one startyear is defined """ first_startyear = cfgs[0].pop("startyear", 1750) if len(cfgs) > 1: for cfg in cfgs[1:]: this_startyear = cfg.pop("startyear", 1750) if this_startyear != first_startyear: raise ValueError("Can only handle one startyear per scenario ensemble") return first_startyear
a43932082bcd128a9badf9b26648e96a9a4ef9bd
16,512
def check_if_point_in_extents(point,extents): """check if a 2D point lies within the bounding box extents = (xmin,ymin,xmax,ymax) returns: boolean""" if (point[0]>=extents[0]) & (point[0]<=extents[2]) & \ (point[1]>=extents[1]) & (point[1]<=extents[3]): return True else: return False return
f25fe2d8b49a44f1866695c749ce1933d6e998fc
16,514
def has_errors(result): """This function checks if a GqlResponse has any errors. Args: result (GqlResponse): [data, errors] Returns: (boolean): Returns `True` if a transaction has at least one error. """ _, errors = result return len(errors) > 0
15fddcf9b2231c946fabb6603edc2635c8b9478f
16,518
def get_dimensions_by_order(dims_in, dataset): """get dimension Parameters ---------- dims_in: int or list of int the dimensions by numerical order dataset: sidpy.Dataset Returns ------- dims_out: list of dimensions """ if isinstance(dims_in, int): dims_in = [dims_in] dims_out = [] for item in dims_in: if isinstance(item, int): if item in dataset._axes: dims_out.append([item, dataset._axes[item]]) return dims_out
3430f045ed57e3d98aec15ffb7298d1c727bee27
16,522
import pickle def load_data(pathToPickleFile): """ Read in pickled file or dir. File: ground_truth_dict = load_data('ground_truth.pkl') Dir: ground_truth_dict = load_data(os.path.join(output_dir, 'ground_truth.pkl')) :param pathToPickleFile: pickled file to read in, e.g. 'dataset.pkl' :return: the data from the pickled file """ with open(pathToPickleFile, 'rb') as pickle_file: data = pickle.load(pickle_file) return data
26655cadd9ba4130b9280eaaa97cdc0b05563521
16,523
import contextlib import socket def port_available(port): """ Find if a port is in use From http://stackoverflow.com/a/35370008 Args: port: The port to be checked. Returns: True if the port is available; False if it is in use. """ with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock: if sock.connect_ex(("localhost", port)) == 0: return False else: return True
5a29eb8a252591a5a05deb759e3811bd52a9940d
16,531
def DEFAULT_REPORT_SCRUBBER(raw): """Remove breakdown and properties.""" return {k: v for k, v in raw.items() if k not in ("breakdown", "properties")}
845f1040728b6826ca6f3fe858730085aeb7787d
16,536
def padded_insert(items, index, value, null_val=None): """ insert value into the items list at given index if index is larger than length of list then extend it up to index and pad the extra space with null_val """ if len(items) == index: items.append(value) elif len(items) > index: items[index] = value else: items.extend([null_val] * (index - len(items))) items.append(value) return items
725dc2d0e314e1bc76bdb7f6d778e9b13316b2aa
16,540
import re def _alphanum_key(string): """Parse a string into string and integer components.""" parity = int(string[0] in '0123456789') return [ int(x) if i % 2 != parity else x for i, x in enumerate(re.split('([0-9]+)', string)[parity:]) ]
12d3f6761a60442327a52c3c60751fe14fa92545
16,543
from typing import List def solution(nums: List[int]) -> bool: """ Look at the list from backwards. Eg: input=[3,2,1,0,4] -> [4,0,1,2,3]. Following the new list, if we can find an element whose value is greater than its gap between index of this element and index of the target, then this element will be a new target for the following elements to reach. Repeating this logic, as long as the last element of the reversed list (the first element based on the input) can reach the last target, it will be able to reach the end. """ nums.reverse() result = False target_idx = 0 for idx, num in enumerate(nums): if num < (idx - target_idx): result = False else: result = True target_idx = idx return result
2d5ce63379c818f1559568dd59082d25b4f03a5d
16,545
def clean_data(df): """ This procedure removes rows containing NAs from the dataframe :param df: dataframe loaded from the flat file :return: dataframe after removing NAs """ return df.dropna()
2839b19c8aff3ce85eb9f9b0716be6a0b85e74c7
16,550
def colorwheel(color_value): """ A colorwheel. ``0`` and ``255`` are red, ``85`` is green, and ``170`` is blue, with the values between being the rest of the rainbow. :param int color_value: 0-255 of color value to return :return: tuple of RGB values """ if color_value < 0 or color_value > 255: return 0, 0, 0 if color_value < 85: return 255 - color_value * 3, color_value * 3, 0 if color_value < 170: color_value -= 85 return 0, 255 - color_value * 3, color_value * 3 color_value -= 170 return color_value * 3, 0, 255 - color_value * 3
22a65f7846e32e58365e573c73d37fa1272a1fda
16,551
import random def _make_random_sequence(length=50): """ Generate a random string for secret key or password """ chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789' return ''.join(random.SystemRandom().choice(chars) for n in range(length))
d69c213b3c2ebfe8fda82400c69a767e91ed9f35
16,552
import requests def get_pypi_info(package="cellpy"): """get version number and sha256 for a pypi package Args: package (str): name of package Returns: [version, sha256] """ url = f"https://pypi.org/pypi/{package}/json" response = requests.get(url) if not response: print(f"url {url} not responding") return None, None response = response.json() version = response["info"]["version"] release = response["releases"][version][-1] sha256 = release["digests"]["sha256"] return version, sha256
819fa2c0ab0264455ae91225dff0d1f8fb498820
16,553
def get_res(catalog): """ Returns the resolution """ return catalog['ishape_hsm_regauss_resolution']
43bd30401f77de10a41be6ab720626acff53d643
16,555
import math def angle_between_vectors(vector1, vector2): """ Returns the angle in radians between vectors 'vector1' and 'vector2':: angle_between((1, 0, 0), (0, 1, 0)): 1.5707963267948966 angle_between((1, 0, 0), (1, 0, 0)): 0.0 angle_between((1, 0, 0), (-1, 0, 0)): 3.141592653589793""" return math.atan2(vector2[0], vector2[2]) - math.atan2(vector1[0], vector1[2])
224965990d6880ea5597b7e2522b24a9f0ef6179
16,558
import pkg_resources def load_resource(resource_path): # pragma: NO COVER """ Gets the content of a resource """ resource_content = pkg_resources.resource_string(__name__, resource_path) return resource_content.decode('utf-8')
5afd4c6072f942c865f64dbceb22cf422903c573
16,560
def list_sample(collection, limit = 3): """ given an ordered collection (list, tuple, ...), return a string representation of the first limit items (or fewer), e.g. "itemA, itemB, itemC and 7 more" """ ln = len(collection) if ln > 1: if ln <= limit: return '%s and %s' % (', '.join(str(ws) for ws in collection[:min(limit, ln)]), collection[min(limit, ln)]) return '%s and %d others' % (', '.join(str(ws) for ws in collection[:min(limit, ln)]), ln - limit) if ln > 0: return collection[0] return ''
3c0926e75fc58ce68ec1919c7b072b067c6749ce
16,567
def _round_down_to_multiple(num, divisor): """ Round the number down to a multiple of the divisor. :param num: :param divisor: :return: """ return num - (num % divisor)
aefb46ca963924ddbbe0d161e2278af0dbcd665d
16,568
def pull(cards, card): """ Pulls the a card from a hand and returns the new hand. Example: > pull([4,5,6,7,8], 6) [4,5,7,8] """ rest = cards[:] rest.remove(card) return rest
8cbe3d0178ae886ee44704e07edfd7501216e699
16,570
import logging def greenlet_exception_logger(logger, level=logging.CRITICAL): """ Return a function that can be used as argument to Greenlet.link_exception() that will log the unhandled exception to the given logger. """ def exception_handler(greenlet): logger.log(level, "Unhandled exception in greenlet: %s", greenlet, exc_info=greenlet.exc_info) return exception_handler
98f413f5f8432214d051f306490014e6927562f2
16,577
def remove_whitespace_chars(text): """ Remove unnecessary (trailing, double, etc.) whitespace characters from a piece of text. :param text: A piece of text. :return Text without unnecessary whitespace. """ return " ".join(text.split())
40640c421bf6e776001e8cfa443dbb2f7148d6f0
16,580
def is_function(obj): """ :param obj: the object to check :return: whether the object is callable """ return hasattr(obj, '__call__')
0e8c7121ad6477482d94286640e66a79e7d9b375
16,590
def get_dynamic_hasher_names(HMAC_KEYS): """ Return base dynamic hasher names for each entry in HMAC_KEYS (we need to create one hasher class for each key). Names are sorted to make sure the HMAC_KEYS are tested in the correct order and the first one is always the first hasher name returned. """ algo_name = lambda hmac_id: 'bcrypt{0}'.format(hmac_id.replace('-', '_')) return [algo_name(key) for key in sorted(HMAC_KEYS.keys(), reverse=True)]
f2243dfdf7c0f56afbdd366fa190d0736dd94323
16,594
def gap_frequency(pileup): """returns the frequency of gaps (n. gaps divided by total number of reads, including N)""" tot_pileup = pileup.sum(axis=0) return tot_pileup[4] / tot_pileup.sum(axis=0)
9f97e944f06d05dd97517ad80a92dcc235e51bc3
16,605
def scale_list(list_): """ Returns a scaled list with the minimum value subtracted from each element of the corresponding list. Parameters ---------- list_ : list Input list. Returns ------- scaled_list : list Scaled list. Examples -------- >>> list_ = [6, 3, 5, 11, 3, 2, 8, 6] >>> scale_list(list_) [4, 1, 3, 9, 1, 0, 6, 4] """ scaled_list = [i - min(list_) for i in list_] return scaled_list
ef64ae41ca223bbf6a4b1c8c535bd2072a53a7a1
16,608
def prob_mass_grey_from_ndvi(ndvi, old_min=0.1, old_max=0.7): """ Calculates probability masses for grey from NDVI values :param ndvi: :param old_min: :param old_max: :return: """ # Not Green belief if ndvi > old_max: return 0 elif ndvi < old_min: return 1 else: new_max = 1 new_min = 0 old_range = old_max - old_min new_range = new_max - new_min return 1 - (((ndvi - old_min) * new_range) / old_range) + new_min
d880c392cd5cdf20a08f8a6c6bdf8d2694824587
16,618
import csv def write_table_to_csv_file(file_name, lowfi_table): """Write the data from a LowFiTable to a .csv file.""" try: with open(file_name, mode='w') as fp: csv_writer = csv.writer(fp, dialect=csv.excel, lineterminator='\n') csv_writer.writerows(lowfi_table.get_headings_and_data_as_list()) return True except: return False
9fbb1def797df95239b1aa5e1198b6fe9f7da26b
16,619
def _construct_name(date, n): """Helper method to construct a name including the directory path""" name = "".join((date, "-img-", "{:03d}".format(n), ".jpg")) return name
a1ecdbf6968216453c8cae08bdccf714fb7edde1
16,622
def hasValidKey(dict, key): """ Return True if key is in dict and not None, False otherwise. """ return key in dict and dict[key] is not None
438ca30f1b133be80389abf8304cd24b09fce1d8
16,624
def clang_find_attributes(node): """Finds attributes one level below the Clang node.""" return [n for n in node.get_children() if n.kind.is_attribute()]
a18483d8f5b19ce1b66abdbef76af5a98e420e21
16,627
import six def rows_from_table(table): """Given a tinyquery.Table, build an API-compatible rows object.""" result_rows = [] for i in six.moves.xrange(table.num_rows): field_values = [{'v': str(col.values[i])} for col in table.columns.values()] result_rows.append({ 'f': field_values }) return result_rows
e8aef81702eb4ce5a89a3c9bc4b0d3389c17ad54
16,628
import socket import struct def inet_aton(s): """Like `socket.inet_aton()` but returns an int.""" packed = socket.inet_aton(s) return struct.unpack('!I', packed)[0]
965388ca49c9f8472fc00f08af810e81bc7f0ff1
16,629
from typing import Dict from typing import List import json def _pluck_listen_ids(aws_event: Dict) -> List[str]: """Pluck the listen ids from a batched sqs event. >>> _pluck_listen_ids({'Records': [{'body': '{"listen_id": "5"}'}]}) ['5'] """ message_bodies = [json.loads(record['body']) for record in aws_event['Records']] return [message_body['listen_id'] for message_body in message_bodies]
e7440915ab23207ae82d3cc3878037df7c8a00d1
16,632
def _unpack_index(i): """Unpack index and return exactly four elements. If index is more shallow than 4, return None for trailing dimensions. If index is deeper than 4, raise a KeyError. """ if len(i) > 4: raise KeyError( "Tried to index history with {} indices but only " "4 indices are possible.".format(len(i))) # fill trailing indices with None i_e, k_e, i_b, k_b = i + tuple([None] * (4 - len(i))) return i_e, k_e, i_b, k_b
323a819107ac7c53c2b2abdabb46e573b620c7e9
16,633
def does_intersect_rect(p, particles, padding, rect, is_3d = False): """ Returns true if particle p is sufficiently close or outside the rectangle (in 2d) or cuboid (in 3d) Parameters ---------- p : list Coordinates of center and radius of particle [x,y,z,r] particles : list List of center + radius of multiple particles. E.g. particles[0] is a list containing coordinates of center and radius. padding: float Minimum distance between circle boundaries such that if two circles rect: list Coordinates of left-bottom and right-top corner points of rectangle (2d) or cuboid (3d). E.g. [x1 y1, z1, x2, y2, z2] is_3d: bool True if we are dealing with cuboid Returns ------- bool True if particle intersects or is near enough to the rectangle """ if len(p) < 4: raise Exception('p = {} must have atleast 4 elements'.format(p)) if len(particles) == 0: raise Exception('particles = {} can not be empty'.format(particles)) if padding < 0.: raise Exception('padding = {} can not be negative'.format(padding)) if len(rect) < 6: raise Exception('rect = {} must have 6 elements'.format(rect)) pr = [p[0] - p[3], p[1] - p[3], p[2], p[0] + p[3], p[1] + p[3], p[2]] if is_3d: pr[2] -= p[3] pr[5] += p[3] if pr[0] < rect[0] + padding or pr[1] < rect[1] + padding or pr[3] > rect[3] - padding or pr[4] > rect[4] - padding: if is_3d: if pr[2] < rect[2] + padding or pr[5] > rect[5] - padding: return True else: return True return False
a3300c17c6f9bf3d8f47efac0c94a222f6ee34ef
16,638
def alphanumericp(c): """Returns true if character is an alphabetic character or a numeric character; otherwise, returns false. """ return type(c) is str and c.isalpha() or c.isnumeric()
f6870d55471e8a96237c56e916df55c0f81266d9
16,639
import warnings def get_link_to_assembly(species_list): """ Get the path for assembly download from NCBI Select the most up-to-date Complete Genome of the each organisms and returns the FTP path for that one. ----------------------------------------------------------------- Argument: species_list--list of dictionary obtained from get_entries_for_species function Return: link_assembly: str, FTP path to the genome assembly page of the selected model: e.g ftp://ftp.ncbi.nlm.nih.gov/genomes/all/GCA/010/448/615/GCA_010448615.1_ASM1044861v1 """ genomes = [] link_assembly = '' for i in species_list: if i['status'] == 'Complete Genome' and i['assembly'] not in ('', '-'): genomes.append(i) date_list = [] if len(genomes) != 0: for i in genomes: if len(genomes) == 1: link_assembly += i['assembly'] elif len(genomes) >1: date_list.append(i['date']) else: # consider eliminating the options below: if there is no # genomic information, there isn't any cds_from_genomic file warnings.warn('There is no complete genome for this species') if len(date_list) != 0: latest = max(date_list) for i in species_list: if i['date'] == latest: link_assembly += i['assembly'] break # to pick the first match in case more than one entry have the same date return link_assembly
50c086d714786c67335715f4005ecd3eb1338e55
16,640
def session(request): """Load the database session for a given aiohttp request Internally, it just returns the value that was given as cleanup context by func:`krake.api.app.db_session`. Args: request (aiohttp.web.Request): HTTP request Returns: krake.database.Session: Database session for the given request """ return request.app["db"]
897a3fc517d4f4a0773617769c60b9dc8fd29056
16,641
from typing import Optional def to_safe_str_or_none(value: Optional[str]) -> Optional[str]: """Convert input to cleaned string or None.""" if value is None: return None v = str(value.strip()).replace('\r', '').replace('\n', '') return v or None
2946183f58aa51deb4deeb450296af95ca41f72e
16,648
def calculate_gc(seq): """ Returns percentage of G and C nucleotides in a DNA sequence. """ gc = 0 for base in seq: if base in "GCgc": gc += 1 else: gc = 1 return gc/len(seq) * 100
51048f70febf7309da1d2b4b8946315bdc31e939
16,651
def rename_coords_to_lon_and_lat(ds): """ Rename Dataset spatial coord names to: lat, lon """ if 'latitude' in ds.coords: ds = ds.rename({'latitude': 'lat'}) if 'longitude' in ds.coords: ds = ds.rename({'longitude': 'lon'}) elif 'long' in ds.coords: ds = ds.rename({'long': 'lon'}) if 'z' in ds.coords: ds = ds.drop('z').squeeze() return ds
8ba286e441f2a32a96fbbddc5c1112a6ed890f84
16,652
def mean(list): """ Given a list or tuple, will return the mean. Usage mean(list) """ sum = 0; for item in list: sum += item return(sum / len(list))
77d88f7386b53ab79eddfa459fa9c8f907961a9e
16,653
def read_pos_data(filename): """ Read data from file which looks like (sec, nsec, x, y, z) 0 8000000 -14 0 0.149843 1 12000000 -13.9997 -1.6e-05 0.117777 2 16000000 -13.9997 -1.9e-05 0.117841 """ arr = [] for line in open(filename): sec, nsec, x, y, z = [float(a) for a in line.split()] arr.append((sec + nsec/1_000_000_000, (x, y, z))) return arr
8f1a148e44033184cbd92cbb1f00de7e7d21e73f
16,654
def decode_response_version_from_config(confbytes: bytes) -> str: """Decode the string version from the bytearray response from Ledger device""" return "{}.{}.{}".format( confbytes[1], confbytes[2], confbytes[3], )
66dc0b71b2c9a22ca8198fb2a5ecbe69a7a0871b
16,656
import re import json def read_json (filename) : """ Comments in the form of # rest of line are stripped from json before parsing use like this:: import pprint pprint.pprint (read_json (sys.argv[1])) """ with open (filename) as f: content = '' # weed out comments for line in f.readlines () : content += re.sub (r'^\s*#.*', '', line) return json.loads (content)
8bd89325e8f3b486bb13790fc4c827427a435515
16,657
def cut_clockwise(positive_offset: bool, spindle_clockwise: bool, climb: bool): """ If all 3 are true, then cut must be done clockwise. Changing one to false, the cut must be done counter-clockwise. Changing two to false, the cut must be done clockwise. Changing all three to false, the cut must be done counter-clockwise. You get the idea.. :param positive_offset: Positive offset = outside cut, negative offset = inside cut :param spindle_clockwise: Spindle spinning clockwise (top->down) :param climb: climb milling (vs conventional milling) :return: cut clockwise (or counter-clockwise) """ return bool((positive_offset + spindle_clockwise + climb) % 2)
62a5c70ce723e8fc26e1cc9d8c0e7b167c031dc5
16,658
import pickle def read_binary_file(file_name): """reads binary file and returns the content""" with open(str(file_name), "rb") as bin_file: obj = pickle.load(bin_file) return obj
4d350edb97310df963fcd2bf3af67deb184f1cdb
16,659
from typing import Sequence from typing import Dict def create_fills_query(start_datetime: str, end_datetime: str, uuids: Sequence[str]) -> Dict: """Create a GetFills dictionary request. Args: start_datetime: UTC datetime as '%Y%-m%-dTHH:MM:SS' end_datetime: UTC datetime as '%Y-%m-%dTHH:MM:SS' uuids: List of user uuids to get fills associated with Returns: A dictionary representation of a blpapi.Request """ return {"GetFills": {"FromDateTime": start_datetime, "ToDateTime": end_datetime, "Scope": {"Uuids": uuids}}}
beb383badbe60d39f72185a8633155f797f98f4f
16,661
def get_request_id(request_json): """Get the request_id from the request """ request_id = request_json['requestInfo'].get('requestId') if not request_id: request_id = request_json['requestInfo'].get('requestID') return request_id
68b3e9e8a15d84d1042173b8fc8c480f996d616a
16,664
def parse_playlist_uri(uri): """ Takes a playlist uri and splits it to (user_id, playlist_id) """ playlistparts = uri.split(':') # Sample: spotify:user:lmljoe:playlist:0DXoY83tBvgWkd8QH49yAI if len(playlistparts) != 5: print('Invalid playlist id') exit() user_id = playlistparts[2] playlist_id = playlistparts[4] return user_id, playlist_id
23e7f36d4008f0e361a93e81289f302bf15cee81
16,665
import inspect import logging def verify_parameters(code, kwargs, err_message): """ Used to verify the that the parameters in kwarg match the signature of the code. :param code: The code fragment's that has the signature to check. :param kwargs: The kwargs to look for :param err_message: An error message to show if the signature doesn't match. :return: """ params = inspect.signature(code).parameters verified = True # The kwargs should all be in the function. Anything left should have a default param_keys = set(params.keys()) param_keys.discard('self') # Make sure they have all the ones we asked for. missing = set(kwargs.keys()) - set(param_keys) for param_name in missing: logging.error(f"Missing param '{param_name}' on function {err_message}") verified = False remaining = set(param_keys) - set(kwargs.keys()) for param_name in remaining: if params[param_name].default == inspect.Parameter.empty: logging.error(f"Param '{param_name}' not passed for {err_message}") verified = False return verified
cd3c3542c41bb7ba0d3f8d7f250f44d743acb0a9
16,667
def merge_dfs_by_index(df1, df2): """ Merge two pandas dataframe index-by-index. The dataframes have to share the same index name. Shared indexes will be merged without data loss. In case of conflicting entries a ValueError is raised. The merge operation is symmetric and does not depend on the order of df1 and df2. Parameters ---------- df1: dataframe Pandas dataframe to be extended df2: dataframe Pandas dataframe with used for extension Returns ------- dataframe: The merged dataframe Raises ---------- ValueError in case of incompatible index names or values """ if df1.index.name != df2.index.name: raise ValueError('Dataframes have incompatible indexes: ' f'{df1.index.name} != {df2.index.name}.') # check for contradicting values by comparing A+B with B+A left_combine = df1.combine_first(df2) right_combine = df2.combine_first(df1) # ignoring dtypes when checking equality if not left_combine.astype(object).equals(right_combine.astype(object)): raise ValueError('Dataframes have incompatible values: ' f'{left_combine.compare(right_combine)}') return right_combine
fe1558403ab2ee7a01b034788edd6ac413f77eaf
16,674
def bytes_to_little_int(data: bytearray) -> int: """Convert bytes to little int.""" return int.from_bytes(data, byteorder="little", signed=False)
bdca6a59b4036cce1bd9bc59a9096556d87b257b
16,675
def resolve_translation(instance, info, language_code): """Gets translation object from instance based on language code.""" return instance.translations.filter(language_code=language_code).first()
79737c123e09760fb1514bbfad7d73c385a4309a
16,676
def tonumber(v): """ Convert a value to int if its an int otherwise a float. """ try: v = int(v) except ValueError as e: v = float(v) return v
8b52ac3385b3ffc721af523799ef3a6da4e29060
16,682
def _param(param, value): """ create 'parameter=value' """ return "{0}={1}".format(param, value)
a64cedf88c20b774ffef23bb7caded4ed7975143
16,683
def array_madness(arr1: list, arr2: list) -> bool: """ This function returns True if the sum of the squares of each element in arr1 is strictly greater than the sum of the cubes of each element in arr2. """ if len(arr1) and len(arr2) >= 1: return True if sum([i**2 for i in arr1]) > sum([i**3 for i in arr2]) else False return False
67f4d2ca7bb51d0134336993c88928eca29b8383
16,684
def get_bcrypt_salt(hashed): """ Get the salt from on bcrypt hashed string """ return hashed[:29]
ce7f632ddb832548841b0daead4e8176899ac21d
16,685