content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def find_bands(bands, target_avg, target_range, min_shows): """ Searches dictionary of bands with band name as keys and competition scores as values for bands that are within the range of the target average and have performed the minimum number of shows. Returns a list of bands that meet the search criteria. Parameters: bands: Dictionary with band name as keys and scores as values. target_avg: Tuple containing the average to look for and the amount of scores to look at. target_range: Range to look away from the target average. min_shows: Minimum number of shows to be eligible. Returns: List of bands that meet the search criteria. >>> DCI = {'Blue Devils': [98.2, 97.1, 99.1, 97.3, 98.2], \ 'Blue Coats': [98, 96.5, 97.2, 93, 92.1, 92, 97.4], \ 'Carolina Crown': [75.7, 82.8, 86.1, 98.2], \ 'The Cadets': [96.1, 93.4, 81, 78, 57.9, 86, 71.2, 35.5], \ 'Mandarins': [89.3, 88.1, 85.6, 83.8, 79.1, 88.4, 75.7], \ 'Little Rocks':[42], \ 'Logan Colts':[98.2, 84.4, 69.2, 42, 84]} >>> find_bands(DCI, (0, 10), 30, 2) [] >>> find_bands(DCI, (90, 5), 5, 7) ['Mandarins'] >>> find_bands(DCI, (70, 8), 10, 5) ['The Cadets', 'Logan Colts'] >>> find_bands(DCI, (95, 3), 5, 4) ['Blue Devils', 'Blue Coats', 'The Cadets'] # My doctests >>> find_bands(DCI, (42, 10), 1, 1) ['Little Rocks'] >>> find_bands(DCI, (87, 10), 5, 5) ['Mandarins'] >>> DCI2 = {'UCSD': [100, 99, 100, 100, 100, 100], \ 'UCLA': [50, 49, 100, 100, 100], \ 'UCD': [90, 90, 87, 45, 79]} >>> find_bands(DCI2, (95, 3), 5, 4) ['UCSD'] >>> find_bands(DCI2, (75, 5), 10, 4) ['UCLA', 'UCD'] """ search_range = [target_avg[0] - target_range, target_avg[0] + target_range] lower_bound = search_range[0] upper_bound = search_range[1] noted_scores = target_avg[1] score_index = 1 in_range = lambda avg: (avg >= lower_bound and avg <= upper_bound) score_avg = lambda scores, kept_scores: sum(scores) / len(scores) \ if len(scores) <= kept_scores \ else sum(scores[0:kept_scores]) / kept_scores return list(map(lambda name: name[0], \ list(filter(lambda band: \ in_range(score_avg(band[score_index], noted_scores)), \ filter(lambda band: True if len(band[score_index]) >= min_shows \ else False, list(bands.items()))))))
1b2b93f0a1d4236ad62102205606eff8afb3802a
704,428
import torch def ones(shape, dtype=None): """Wrapper of `torch.ones`. Parameters ---------- shape : tuple of ints Shape of output tensor. dtype : data-type, optional Data type of output tensor, by default None """ return torch.ones(shape, dtype=dtype)
a234936baa16c8efdc63e903d8455895ab7f2f0c
704,440
import asyncio def mock_coro(return_value=None, exception=None): """Return a coro that returns a value or raise an exception.""" fut = asyncio.Future() if exception is not None: fut.set_exception(exception) else: fut.set_result(return_value) return fut
d06d037bab143e288534e3e7e98da259f7c1cefc
704,448
import requests import json def request_records(request_params): """ Download utility rate records from USURDB given a set of request parameters. :param request_params: dictionary with request parameter names as keys and the parameter values :return: """ records = requests.get( "https://api.openei.org/utility_rates?", params=request_params ) request_content = records.content # strict=False prevents an error (control characters are allowed inside # strings) json_records = json.loads(request_content, strict=False) return json_records
7323657186cc87a291e47c3a71cd2e81b4ec8a73
704,449
def calc_glass_constants(nd, nF, nC, *partials): """Given central, blue and red refractive indices, calculate Vd and PFd. Args: nd, nF, nC: refractive indices at central, short and long wavelengths partials (tuple): if present, 2 ref indxs, n4 and n5, wl4 < wl5 Returns: V-number and relative partial dispersion from F to d If `partials` is present, the return values include the central wavelength index and the relative partial dispersion between the 2 refractive indices provided from `partials`. """ dFC = nF-nC vd = (nd - 1.0)/dFC PFd = (nF-nd)/dFC if len(partials) == 2: n4, n5 = partials P45 = (n4-n5)/dFC return nd, vd, PFd, P45 return vd, PFd
f347b6caf167c19451bb2f03e88b5846c6873250
704,451
def in_range(x, a1, a2): """Check if (modulo 360) x is in the range a1...a2. a1 must be < a2.""" a1 %= 360. a2 %= 360. if a1 <= a2: # "normal" range (not including 0) return a1 <= x <= a2 # "jumping" range (around 0) return a1 <= x or x <= a2
8855ea29e44c546d55122c7c6e4878b44a3bc272
704,455
def add_common_arguments(parser): """Populate the given argparse.ArgumentParser with arguments. This function can be used to make the definition these argparse arguments reusable in other modules and avoid the duplication of these definitions among the executable scripts. The following arguments are added to the parser: - **...** (...): ... Parameters ---------- parser : argparse.ArgumentParser The parser to populate. Returns ------- argparse.ArgumentParser Return the populated ArgumentParser object. """ return parser
c8e3eba16c33f0fcf12caf3a31b281dcee858648
704,457
def attr_names(obj): """ Determine the names of user-defined attributes of the given SimpleNamespace object. Source: https://stackoverflow.com/a/27532110 :return: A list of strings. """ return sorted(obj.__dict__)
ecbc0321d0796925341731df303c48ea911fcf57
704,459
import random def weighted_choice(choices): """ Pick a weighted value off :param list choices: Each item is a tuple of choice and weight :return: """ total = sum(weight for choice, weight in choices) selection = random.uniform(0, total) counter = 0 for choice, weight in choices: if counter + weight > selection: return choice counter += weight assert False, "Shouldn't get here"
c32ff27b9892bb88db2928ec22c4ede644f6792c
704,461
def get_layout(data, width_limit): """A row of a chart can be dissected as four components below: 1. Label region ('label1'): fixed length (set to max label length + 1) 2. Intermediate region (' | '): 3 characters 3. Bar region ('▇ or '): variable length This function first calculates the width of label region(1), and compute the longest of the lengths of bar(3) regions. Then returns the layout of the chart, which is described by the widths of each regions. The total widths of the chart will not exceed width_limit-15 characters, just for an aesthetic reason. """ labels = [d[0] for d in data] label_width = len(max(labels, key=lambda label: len(label))) + 1 intermediate_width = 3 bar_width = (width_limit - 15) - (label_width + intermediate_width) return label_width, bar_width
dbb8bfa2c537f3b05713bf3abdc106ec74bc7ac9
704,466
def get_number_of_classes(model_config): """Returns the number of classes for a detection model. Args: model_config: A model_pb2.DetectionModel. Returns: Number of classes. Raises: ValueError: If the model type is not recognized. """ meta_architecture = model_config.WhichOneof("model") meta_architecture_config = getattr(model_config, meta_architecture) if hasattr(meta_architecture_config, "num_classes"): return meta_architecture_config.num_classes else: raise ValueError("{} does not have num_classes.".format(meta_architecture))
d87605b6025e1bc78c7436affe740f7591a99f68
704,467
def shorten_build_target(build_target: str) -> str: """Returns a shortened version of the build target.""" if build_target == '//chrome/android:chrome_java': return 'chrome_java' return build_target.replace('//chrome/browser/', '//c/b/')
03af53f1fcacae9a4e0309053075806d65275ce9
704,468
from typing import Tuple import random def draw_two(max_n: int) -> Tuple[int, int]: """Draw two different ints given max (mod max).""" i = random.randint(0, max_n) j = (i + random.randint(1, max_n - 1)) % max_n return i, j
9ebb09158c296998c39a2c4e8fc7a18456428fc6
704,471
def versionPropertiesDictionary(sql_row_list): """ versionPropertiesDictionary(sql_row_list) transforms a row gotten via SQL request (list), to a dictionary """ properties_dictionary = \ { "id": sql_row_list[0], "model_id": sql_row_list[1], "version": sql_row_list[2], "metadata": sql_row_list[3], "commit_comment": sql_row_list[4], "created_timestamp": sql_row_list[5] } return properties_dictionary;
ab8cdd166bf8a187945c44fd416c3a4cf4634d02
704,472
def ms2str(v): """ Convert a time in milliseconds to a time string. Arguments: v: a time in milliseconds. Returns: A string in the format HH:MM:SS,mmm. """ v, ms = divmod(v, 1000) v, s = divmod(v, 60) h, m = divmod(v, 60) return f"{h:02d}:{m:02d}:{s:02d},{ms:03d}"
5d50aa072584e5ad17d8bd3d08b0b0813aced819
704,475
def is_index(file_name: str) -> bool: """Determines if a filename is a proper index name.""" return file_name == "index"
7beb5779b61e25b4467eb7964478c78d44f28931
704,480
def search_tag(resource_info, tag_key): """Search tag in tag list by given tag key.""" return next( (tag["Value"] for tag in resource_info.get("Tags", []) if tag["Key"] == tag_key), None, )
5945631a3de7032c62c493369e82dd330ef2bc47
704,483
import six def _expand_expected_codes(codes): """Expand the expected code string in set of codes. 200-204 -> 200, 201, 202, 204 200, 203 -> 200, 203 """ retval = set() for code in codes.replace(',', ' ').split(' '): code = code.strip() if not code: continue elif '-' in code: low, hi = code.split('-')[:2] retval.update( str(i) for i in six.moves.xrange(int(low), int(hi) + 1)) else: retval.add(code) return retval
52056db88bf14352d4cda2411f25855457defbd7
704,485
import torch def make_complex_matrix(x, y): """A function that takes two tensors (a REAL (x) and IMAGINARY part (y)) and returns the combine complex tensor. :param x: The real part of your matrix. :type x: torch.doubleTensor :param y: The imaginary part of your matrix. :type y: torch.doubleTensor :raises ValueError: This function will not execute if x and y do not have the same dimension. :returns: The full vector with the real and imaginary parts seperated as previously mentioned. :rtype: torch.doubleTensor """ if x.size()[0] != y.size()[0] or x.size()[1] != y.size()[1]: raise ValueError( 'Real and imaginary parts do not have the same dimension.') z = torch.zeros(2, x.size()[0], x.size()[1], dtype=torch.double) z[0] = x z[1] = y return z
faae031b3aa6f4972c8f558f6b66e33d416dec71
704,489
from typing import SupportsAbs import math def is_unit(v: SupportsAbs[float]) -> bool: # <2> """'True' if the magnitude of 'v' is close to 1.""" return math.isclose(abs(v), 1.0)
0b31da2e5a3bb6ce49705d5b2a36d3270cc5d802
704,491
async def async_unload_entry(hass, config_entry): """Handle removal of an entry.""" return True
28005ececbf0c43c562cbaf7a2b8aceb12ce3e41
704,496
import json def is_valid_json(text: str) -> bool: """Is this text valid JSON? """ try: json.loads(text) return True except json.JSONDecodeError: return False
3013210bafd5c26cacb13e9d3f4b1b708185848b
704,497
from pathlib import Path def get_config_path(root: str, idiom: str) -> Path: """Get path to idiom config Arguments: root {str} -- root directory of idiom config idiom {str} -- basename of idiom config Returns: Tuple[Path, Path] -- pathlib.Path to file """ root_path = Path(root) file_name = '{}.json'.format(idiom) return root_path.joinpath(file_name)
86d65f11fbd1dfb8aca13a98e129b085158d2aff
704,498
def minor_min_width(G): """Computes a lower bound for the treewidth of graph G. Parameters ---------- G : NetworkX graph The graph on which to compute a lower bound on the treewidth. Returns ------- lb : int A lower bound on the treewidth. Examples -------- This example computes a lower bound for the treewidth of the :math:`K_7` complete graph. >>> K_7 = nx.complete_graph(7) >>> dnx.minor_min_width(K_7) 6 References ---------- Based on the algorithm presented in [GD]_ """ # we need only deal with the adjacency structure of G. We will also # be manipulating it directly so let's go ahead and make a new one adj = {v: set(u for u in G[v] if u != v) for v in G} lb = 0 # lower bound on treewidth while len(adj) > 1: # get the node with the smallest degree v = min(adj, key=lambda v: len(adj[v])) # find the vertex u such that the degree of u is minimal in the neighborhood of v neighbors = adj[v] if not neighbors: # if v is a singleton, then we can just delete it del adj[v] continue def neighborhood_degree(u): Gu = adj[u] return sum(w in Gu for w in neighbors) u = min(neighbors, key=neighborhood_degree) # update the lower bound new_lb = len(adj[v]) if new_lb > lb: lb = new_lb # contract the edge between u, v adj[v] = adj[v].union(n for n in adj[u] if n != v) for n in adj[v]: adj[n].add(v) for n in adj[u]: adj[n].discard(u) del adj[u] return lb
649ea7fe0a55ec5289b04b761ea1633c2a258000
704,499
def normalize_email(email): """Normalizes the given email address. In the current implementation it is converted to lower case. If the given email is None, an empty string is returned. """ email = email or '' return email.lower()
6ee68f9125eef522498c7299a6e793ba11602ced
704,500
def parse_read_options(form, prefix=''): """Extract read options from form data. Arguments: form (obj): Form object Keyword Arguments: prefix (str): prefix for the form fields (default: {''}) Returns: (dict): Read options key - value dictionary. """ read_options = { 'encoding': getattr(form, prefix+'encoding').data, 'delimiter': getattr(form, prefix+'delimiter').data, } geom = getattr(form, prefix+'geom') lat = getattr(form, prefix+'lat') lon = getattr(form, prefix+'lon') if geom.data != '': read_options['geom'] = geom.data elif lat.data != '' and lon.data != '': read_options['lat'] = lat.data read_options['lon'] = lon.data return read_options
660e836172015999fe74610dffc331d2b37991c3
704,501
import operator import math def unit_vector(vec1, vec2): """ Return a unit vector pointing from vec1 to vec2 """ diff_vector = map(operator.sub, vec2, vec1) scale_factor = math.sqrt( sum( map( lambda x: x**2, diff_vector ) ) ) if scale_factor == 0: scale_factor = 1 # We don't have an actual vector, it has zero length return map(lambda x: x/scale_factor, diff_vector)
79e2cff8970c97d6e5db5259801c58f82075b1a2
704,506
import re def get_info(prefix, string): """ :param prefix: the regex to match the info you are trying to obtain :param string: the string where the info is contained (can have new line character) :return: the matches within the line """ info = None # find and return the matches based on the prefix and if there is a match (not empty) matches = re.findall(prefix, string) if len(matches) > 0: info = matches[0] return info
ed41100910df8ec3e0060ecd1196fb8cc1060329
704,516
def is_nonnegative_length(G, l): """ Checks whether a length function, defined on the arcs, satisfies the non-negative condition. Args: G: An instance of Graph class. l: A dictionary that defines a length function on the edge set. Returns: A boolean, True if the length function satisfies the non-negativity condition, False in other case. """ assert G.directed # Condición de no negatividad for e in G.aristas: if l[e] < 0: return False return True
c99aaf07b65f9a192b6421b4b3ccf73c98917500
704,521
def bytes_to_int(b: bytes) -> int: """ Convert bytes to a big-endian unsigned int. :param b: The bytes to be converted. :return: The int. """ return int.from_bytes(bytes=b, byteorder='big', signed=False)
eb08ae0b2663047557b8f102c6c6ed565aae8044
704,526
def pyav_decode_stream( container, start_pts, end_pts, stream, stream_name, buffer_size=0 ): """ Decode the video with PyAV decoder. Args: container (container): PyAV container. start_pts (int): the starting Presentation TimeStamp to fetch the video frames. end_pts (int): the ending Presentation TimeStamp of the decoded frames. stream (stream): PyAV stream. stream_name (dict): a dictionary of streams. For example, {"video": 0} means video stream at stream index 0. buffer_size (int): number of additional frames to decode beyond end_pts. Returns: result (list): list of frames decoded. max_pts (int): max Presentation TimeStamp of the video sequence. """ # Seeking in the stream is imprecise. Thus, seek to an ealier PTS by a # margin pts. margin = 1024 seek_offset = max(start_pts - margin, 0) container.seek(seek_offset, any_frame=False, backward=True, stream=stream) frames = {} buffer_count = 0 max_pts = 0 for frame in container.decode(**stream_name): max_pts = max(max_pts, frame.pts) if frame.pts < start_pts: continue if frame.pts <= end_pts: frames[frame.pts] = frame else: buffer_count += 1 frames[frame.pts] = frame if buffer_count >= buffer_size: break result = [frames[pts] for pts in sorted(frames)] return result, max_pts
5b012899c047dcd3ee90d793c68ebdd1d2f413c1
704,530
def crop_images(x, y, w, h, *args): """ Crops all the images passed as parameter using the box coordinates passed """ assert len(args) > 0, "At least 1 image needed." cropped = [] for img in args: cropped.append(img[x : x + h, y : y + w]) return cropped
e8f78246c0bfeb3d370b8fe01e264b2f7e0e1c49
704,537
import time import json def WriteResultToJSONFile(test_suites, results, json_path): """Aggregate a list of unittest result object and write to a file as a JSON. This takes a list of result object from one or more runs (for retry purpose) of Python unittest tests; aggregates the list by appending each test result from each run and writes to a file in the correct format for the --isolated-script-test-output argument passed to test isolates. Args: test_suites: a list of unittest.TestSuite that were run to get the list of result object; each test_suite contains the tests run and is iterated to get all test cases ran. results: a list of unittest.TextTestResult object returned from running unittest tests. json_path: desired path to JSON file of result. """ output = { 'interrupted': False, 'num_failures_by_type': {}, 'path_delimiter': '.', 'seconds_since_epoch': time.time(), 'tests': {}, 'version': 3, } def initialize(test_suite): for test in test_suite: if test.id() not in output['tests']: output['tests'][test.id()] = { 'expected': 'PASS', 'actual': [] } for test_suite in test_suites: initialize(test_suite) def get_pass_fail(test_suite, result): success = [] fail = [] for failure in result.failures + result.errors: fail.append(failure[0].id()) for test in test_suite: if test.id() not in fail: success.append(test.id()) return { 'success': success, 'fail': fail, } for test_suite, result in zip(test_suites, results): pass_fail = get_pass_fail(test_suite, result) for s in pass_fail['success']: output['tests'][s]['actual'].append('PASS') for f in pass_fail['fail']: output['tests'][f]['actual'].append('FAIL') num_fails = 0 for test_result in output['tests'].itervalues(): if test_result['actual'][-1] == 'FAIL': num_fails += 1 test_result['is_unexpected'] = True test_result['actual'] = ' '.join(test_result['actual']) output['num_failures_by_type']['FAIL'] = num_fails output['num_failures_by_type']['PASS'] = len(output['tests']) - num_fails with open(json_path, 'w') as script_out_file: json.dump(output, script_out_file) script_out_file.write('\n')
cb53b65bf5c8ceb1d0695e38c4ebeedd4916fe14
704,538
def split_person_name(name): """ A helper function. Split a person name into a first name and a last name. Example. >>> split_person_name("Filip Oliver Klimoszek") ("Filip Oliver", "Klimoszek") >>> split_person_name("Klimoszek") ("", "Klimoszek") """ parts = name.split(" ") return " ".join(parts[:-1]), parts[-1]
86b7c7cec1e7772437f41f11437834cfa34051c7
704,539
def format_price(raw_price): """Formats the price to account for bestbuy's raw price format Args: raw_price(string): Bestbuy's price format (ex: $5999 is $59.99) Returns: string: The formatted price """ formatted_price = raw_price[:len(raw_price) - 2] + "." + raw_price[len(raw_price) - 2:] return formatted_price
a3b0adc94421334c3f1c4fe947329d329e68990e
704,541
def replicated_data(index): """Whether data[index] is a replicated data item""" return index % 2 == 0
26223e305d94be6e092980c0eb578e138cfa2840
704,543
def get_user_roles_common(user): """Return the users role as saved in the db.""" return user.role
cf25f029325e545f5d7685e6ac19e0e09105d65a
704,544
def partial_es(Y_idx, X_idx, pred, data_in, epsilon=0.0001): """ The analysis on the single-variable dependency in the neural network. The exact partial-related calculation may be highly time consuming, and so the estimated calculation can be used in the bad case. Args: Y_idx: index of Y to access the target variable of interest X_idx: index of X to access the independent variable of a neural network data_in: the specified data of input layer pred: the specified predictive model Returns: The first-order derivative of Y on X for the specified X index and Y index """ eps = epsilon y1 = pred(data_in) data_in[X_idx] += eps y2 = pred(data_in) return (y2[Y_idx] - y1[Y_idx]) / eps
12186469b27bebea4735372e2b45f463bbfbaff1
704,545
def isfloat(s): """ Checks whether the string ``s`` represents a float. :param s: the candidate string to test :type s: ``str`` :return: True if s is the string representation of a number :rtype: ``bool`` """ try: x = float(s) return True except: return False
2233d0a06b9ff0be74f76ef2fce31c816f68584c
704,547
def percentage_to_float(x): """Convert a string representation of a percentage to float. >>> percentage_to_float('55%') 0.55 Args: x: String representation of a percentage Returns: float: Percentage in decimal form """ return float(x.strip('%')) / 100
6c1aeac99278963d3dd207d515e72b6e1e79f09f
704,548
from typing import OrderedDict import inspect def build_paramDict(cur_func): """ This function iterates through all inputs of a function, and saves the default argument names and values into a dictionary. If any of the default arguments are functions themselves, then recursively (depth-first) adds an extra field to the dictionary, named <funcName + "_params">, that contains its inputs and arguments. The output of this function can then be passed as a "kwargs" object to the highest level function, which will then pass the parameter values to the lower dictionary levels appropriately """ paramDict = OrderedDict() allArgs = inspect.getfullargspec(cur_func) # Check if there are any default parameters, if no, just return empty dict if allArgs.defaults is None: return paramDict for argname, argval in zip(allArgs.args[-len(allArgs.defaults):], allArgs.defaults): # Save the default argument paramDict[argname] = argval # If the default argument is a function, inspect it for further if callable(argval): # print(argname) paramDict[argname+"_params"] = build_paramDict(argval) return paramDict
b62daf5ffe7b9211d898d26dc754875459dbe1ba
704,551
def get_channel_members_names(channel): """Returns a list of all members of a channel. If the member has a nickname, the nickname is used instead of their name, otherwise their name is used""" names = [] for member in channel.members: if member.nick is None: names.append(member.name) else: names.append(member.nick) return names
955ea4013841fe8aac52f0474a65e221795db571
704,553
def compress_vertex_list(individual_vertex: list) -> list: """ Given a list of vertices that should not be fillet'd, search for a range and make them one compressed list. If the vertex is a point and not a line segment, the returned tuple's start and end are the same index. Args: individual_vertex (list): List of UNIQUE ints. Each int refers to an index of a LineString. Returns: list: A compressed list of tuples. So, it combines adjacent vertices into a longer one. """ reduced_idx = list() sorted_vertex = sorted(individual_vertex) len_vertex = len(sorted_vertex) if len_vertex > 0: # initialzie to unrealistic number. start = -1 end = -1 size_of_range = 0 for index, item in enumerate(sorted_vertex): if index == 0: start = item end = item else: if item == end + 1: end = item size_of_range += 1 else: if size_of_range == 0: # Only one vertex in range. reduced_idx.append((start, end)) start = item end = item else: # Two or more vertexes in range. reduced_idx.append((start, end)) size_of_range = 0 start = item end = item if index == len_vertex - 1: if size_of_range == 0: reduced_idx.append((start, end)) else: reduced_idx.append((start, end)) return reduced_idx else: return reduced_idx
a98f8b101219215f719b598ed8c47074a42ecb13
704,555
def week_of_year(datetime_col): """Returns the week from a datetime column.""" return datetime_col.dt.week
c1bf4e0cd5d4aeddf2cff9a1142fcb45b17d1425
704,557
def normalize(df, df_ref=None): """ Normalize all numerical values in dataframe :param df: dataframe :param df_ref: reference dataframe """ if df_ref is None: df_ref = df df_norm = (df - df_ref.mean()) / df_ref.std() return df_norm
56c96f43c98593a5cf21425f23cfd92a7f6d6fe3
704,558
def _validate_positive_int(value): """Validate value is a natural number.""" try: value = int(value) except ValueError as err: raise ValueError("Could not convert to int") from err if value > 0: return value else: raise ValueError("Only positive values are valid")
ddc2087d69c96fa72594da62192df58555b25029
704,560
def transpose(table): """ Returns a copy of table with rows and columns swapped Example: 1 2 1 3 5 3 4 => 2 4 6 5 6 Parameter table: the table to transpose Precondition: table is a rectangular 2d List of numbers """ result = [] # Result (new table) accumulator # Loop over columns # Add each column as a row to result return result
fe84714d3e09deb22058fd75ac3333c2206f77c3
704,561
def max_rl(din): """ A MAX function should "go high" only when all of its inputs have arrived. Thus, AND gates are used for its implementation. Input: a list of 1-bit WireVectors Output: a 1-bit WireVector """ if len(din) == 1: dout = din[0] else: dout = din[0] & max_rl(din[1:]) return dout
b65710967a8a785e1ca0679252ac69c140b4c560
704,562
import requests def process_request(url, auth): """Perform an http request. :param url: full url to query :type url: ``str`` :param auth: username, password credentials :type auth: ``tuple`` || ``None`` :returns: ``dict`` """ content = requests.get(url, auth=auth) if content.status_code >= 300: raise SystemExit(content.content) return content.json()
051c60e03458e3c38d93dfd65d15f355ec284c12
704,563
import random def genpass(pwds_amount=1, paswd_length=8): """ Returns a list of 'pwds_amount' random passwords, having length of 'paswd_length' """ return [ ''.join([chr(random.randint(32, 126)) for _ in range(paswd_length)]) for _ in range(pwds_amount)]
d5d4e38cc334f44e837c72f265a391bf72f5bd5f
704,565
def vect3_scale(v, f): """ Scales a vector by factor f. v (3-tuple): 3d vector f (float): scale factor return (3-tuple): 3d vector """ return (v[0]*f, v[1]*f, v[2]*f)
94902cad0a7743f8e3ed1582bf6402229b8a028d
704,566
import math def RadialToTortoise(r, M): """ Convert the radial coordinate to the tortoise coordinate r = radial coordinate M = ADMMass used to convert coordinate return = tortoise coordinate value """ return r + 2. * M * math.log( r / (2. * M) - 1.)
1bbfad661d360c99683b3c8fbe7a9c0cabf19686
704,570
def MediumOverLong(lengths): """ A measure of how needle or how plate-like a molecules is. 0 means perfect needle shape 1 means perfect plate-like shape ShortOverLong = Medium / Longest """ return lengths[1]/lengths[2]
48a053b55b39a50d7b0f618f843d370a55220765
704,575
def rename_category_for_flattening(category, category_parent=""): """ Tidy name of passed category by removing extraneous characters such as '_' and '-'. :param category: string to be renamed (namely, a category of crime) :param category_parent: optional string to insert at the beginning of the string (in addition to other edits) :return: new string name for category passed """ if category_parent == "": return category.lower().replace(" ", "_").replace("/", "").replace("(", "").replace(")", "").replace(",", "").replace(";", "_").replace("-", "") return category_parent + "_" + category.lower().replace(" ", "_").replace("/", "").replace("(", "").replace(")", "").replace(",", "").replace(";", "").replace("-", "")
360e87da0a8a778f32c47adc58f33a2b92fea801
704,580
import math def billing_bucket(t): """ Returns billing bucket for AWS Lambda. :param t: An elapsed time in ms. :return: Nearest 100ms, rounding up, as int. """ return int(math.ceil(t / 100.0)) * 100
87b9963c1a2ef5ad7ce1b2fac67e563dcd763f73
704,581
def adjust_lr_on_plateau(optimizer): """Decrease learning rate by factor 10 if validation loss reaches a plateau""" for param_group in optimizer.param_groups: param_group['lr'] = param_group['lr']/10 return optimizer
615631fd4853e7f0c0eae59a3336eb4c4794d3a3
704,583
import re def replace_php_define(text, define, value): """ Replaces a named constaint (define) in PHP code. Args: text (str) : The PHP code to process. define (str) : Name of the named constant to modify. value (int,str) : Value to set the 'define' to. Returns: The modified PHP code. """ if isinstance(value, str): replacement = '\g<1>\'{0}\'\g<2>'.format(value) elif isinstance(value,int): replacement = '\g<1>{0}\g<2>'.format(value) else: raise RuntimeError('Datatype is not supported.') regex = '^(\s*define\s*\(\s*\'{0}\'\s*,\s*).*(\s*\)\s*;.*)'.format(re.escape(define)) text,substitutions = re.subn(regex, replacement, text, 1, re.MULTILINE | re.UNICODE) if substitutions == 0: raise RuntimeError('Named constant \'{0}\' is not part of the specified php code.'.format(define)) return text
02e3194d6fb83958d525651cdca6e3cec1cf3bb7
704,590
def _only_one_selected(*args): """Test if only one item is True.""" return sum(args) == 1
9966cc7c2cde16c689f29ba2add80b2cddce56e7
704,592
import random import string def generate_random_id(start: str = ""): """ Generates a random alphabetic id. """ result = "".join(random.SystemRandom().choices(string.ascii_lowercase, k=16)) if start: result = "-".join([start, result]) return result
f818ecf7ba4296a3ad010ef20bc5e286036bb56d
704,593
def get_client_names(worksheet) -> list: """Get list of client names from Excel worksheet.""" num_rows = worksheet.max_row names = [] for i in range(2, num_rows+1): cell_obj = worksheet.cell(row=i, column=1) if cell_obj.value not in names: names.append(cell_obj.value) return names
6da6e52ed10e84ae79119c511e063114bb61b334
704,594
def upper(value: str): # Only one argument. """Converts a string into all uppercase""" return value.upper()
8ec4c4ed284bc8d823e356db7749a4c98a00b194
704,596
def xor(a,b): """ XOR two strings of same length""" assert len(a) == len(b) x = [] for i in range(len(a)): x.append( chr(ord(a[i])^ord(b[i]))) return ''.join(x)
cbe3d32883dc5516821711181c7f5d52194d89de
704,597
def hello_world(text: str) -> str: """Print and return input.""" print(text) return text
7bfcb8e9cfccdf5fad8c702f97f6b7c4e56c7682
704,601
import functools def polygon_wrapper(func): """ Wrapper function to perform the setup and teardown of polygon attributes before and after creating the polygon. Keyword arguments: func (function) -- the function to draw the polygon. """ @functools.wraps(func) def draw_polygon(self, *args, **kwargs): """ Setup the Context, draw the polygon with attributes applied, and teardown the environment. """ # Save the Context so we can restore it when this is done self.context.save() # Initialize the polygon's attributes self._init_attributes(**kwargs) # Call the function result = func(self, *args, **kwargs) # Fill the polygon, if it's being filled if self.fill: self.context.fill_preserve() # Set the outline fill_color and outline the polygon self.calling_surface._set_color(self.line_color) self.context.stroke() # Restore the Context now that the polygon is drawn self.context.restore() return result return draw_polygon
76056e41c36a2c15dcb8a2e05cc4ec4c1beb68dc
704,605
def compose_base_find_query(user_id: str, administrator: bool, groups: list): """ Compose a query for filtering reference search results based on user read rights. :param user_id: the id of the user requesting the search :param administrator: the administrator flag of the user requesting the search :param groups: the id group membership of the user requesting the search :return: a valid MongoDB query """ if administrator: return dict() is_user_member = { "users.id": user_id } is_group_member = { "groups.id": { "$in": groups } } is_owner = { "user.id": user_id } return { "$or": [ is_group_member, is_user_member, is_owner ] }
2f398930603093ddc59e0c6ba4956e7d46a7758d
704,606
def filter_df_on_ncases(df, case_id_glue="case:concept:name", max_no_cases=1000): """ Filter a dataframe keeping only the specified maximum number of cases Parameters ----------- df Dataframe case_id_glue Case ID column in the CSV max_no_cases Maximum number of cases to keep Returns ------------ df Filtered dataframe """ cases_values_dict = dict(df[case_id_glue].value_counts()) cases_to_keep = [] for case in cases_values_dict: cases_to_keep.append(case) cases_to_keep = cases_to_keep[0:min(len(cases_to_keep),max_no_cases)] df = df[df[case_id_glue].isin(cases_to_keep)] return df
5f8532ebe465d7b80934b35ef8d3925217f4e355
704,608
def get_first_group (match): """ Retrieves the first group from the match object. """ return match.group(1)
d4103989a7fbd55e40600d391b51dfb93053ed8f
704,612
def _uint_to_le(val, length): """Returns a byte array that represents an unsigned integer in little-endian format. Args: val: Unsigned integer to convert. length: Number of bytes. Returns: A byte array of ``length`` bytes that represents ``val`` in little-endian format. """ return val.to_bytes(length=length, byteorder='little')
54e765e7b3772c6e2e6dc4c7e6de48d034b9d4b5
704,616
def get_type_path(type, type_hierarchy): """Gets the type's path in the hierarchy (excluding the root type, like owl:Thing). The path for each type is computed only once then cached in type_hierarchy, to save computation. """ if 'path' not in type_hierarchy[type]: type_path = [] current_type = type while current_type in type_hierarchy: type_path.append(current_type) current_type = type_hierarchy[current_type]['parent'] type_hierarchy[type]['path'] = type_path return type_hierarchy[type]['path']
29344b63197f4ea6650d059767100401c693990a
704,618
def expand_parameters_from_remanence_array(magnet_parameters, params, prefix): """ Return a new parameters dict with the magnet parameters in the form '<prefix>_<magnet>_<segment>', with the values from 'magnet_parameters' and other parameters from 'params'. The length of the array 'magnet_parameters' must be equal to the sum of the number of segments in both cylinders. The first n_II elements refer to the inner magnet, and the remaining elements to the outer magnet. """ params_expanded = params.copy() n_II = params["n_II"] for i in range(0, n_II): params_expanded["%s_II_%d" % (prefix, i + 1,)] = magnet_parameters[i] n_IV = params["n_IV"] for j in range(0, n_IV): k = j + n_II # the first n_II elements refer to magnet II params_expanded["%s_IV_%d" % (prefix, j + 1,)] = magnet_parameters[k] return params_expanded
e087f5b1e8ea264f074f921a5283d7806178664b
704,620
def tag_group(tag_group, tag): """Select a tag group and a tag.""" payload = {"group": tag_group, "tag": tag} return payload
f22ccd817145282729876b0234c8309c24450140
704,622
def read_seq_file(filename): """Reads data from sequence alignment test file. Args: filename (str): The file containing the edge list. Returns: str: The first sequence of characters. str: The second sequence of characters. int: The cost per gap in a sequence. int: The cost per mismatch in a sequence. """ with open(filename, 'r') as f: next(f) # Skip first line cost_gap, cost_mismatch = next(f).strip().split() cost_gap, cost_mismatch = int(cost_gap), int(cost_mismatch) seq_x = next(f).strip() seq_y = next(f).strip() return seq_x, seq_y, cost_gap, cost_mismatch
9160bb0b2643deae669818cea1bc1ebeb51506b8
704,624
def _seasonal_prediction_with_confidence(arima_res, start, end, exog, alpha, **kwargs): """Compute the prediction for a SARIMAX and get a conf interval Unfortunately, SARIMAX does not really provide a nice way to get the confidence intervals out of the box, so we have to perform the ``get_prediction`` code here and unpack the confidence intervals manually. Notes ----- For internal use only. """ results = arima_res.get_prediction( start=start, end=end, exog=exog, **kwargs) f = results.predicted_mean conf_int = results.conf_int(alpha=alpha) return f, conf_int
9520bf1a60eeb39c25e9a369b0b337905df9afb8
704,626
def top_height(sz): """Returns the height of the top part of size `sz' AS-Waksman network.""" return sz // 2
1e4a43a8935cc5c3ccf104e93f87919205baf4a4
704,627
def reverse_complement(dna): """ Reverse-complement a DNA sequence :param dna: string, DNA sequence :type dna: str :return: reverse-complement of a DNA sequence """ complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'} return ''.join([complement[base] for base in dna[::-1]])
efcb38e06fc494adabeb304934ebef9bd932a11f
704,631
import torch def threshold(tensor, density): """ Computes a magnitude-based threshold for given tensor. :param tensor: PyTorch tensor :type tensor: `torch.Tensor` :param density: Desired ratio of nonzeros to total elements :type density: `float` :return: Magnitude threshold :rtype: `float` """ tf = tensor.abs().view(-1) numel = int(density * tf.numel()) if numel == 0: raise RuntimeError('Provided density value causes model to be zero.') topk, _ = torch.topk(tf.abs(), numel, sorted=True) return topk.data[-1]
d0c5a2726a2df195b0588af8af95dac187f50e1b
704,635
def _succ(p, l): """ retrieve the successor of p in list l """ pos = l.index(p) if pos + 1 >= len(l): return l[0] else: return l[pos + 1]
0eea63bd24da4079b9718af437c6d7e38ef25444
704,638
def reorder_cols_df(df, cols): """Reorder the columns of a DataFrame to start with the provided list of columns""" cols2 = [c for c in cols if c in df.columns.tolist()] cols_without = df.columns.tolist() for col in cols2: cols_without.remove(col) return df[cols2 + cols_without]
917b0084ba34f8e1b1fc697c4838ff8404a2fc90
704,641
def add_923_heat_rate(df): """ Small function to calculate the heat rate of records with fuel consumption and net generation. Parameters ---------- df : dataframe Must contain the columns net_generation_mwh and fuel_consumed_for_electricity_mmbtu Returns ------- dataframe Same dataframe with new column of heat_rate_mmbtu_mwh """ # Calculate the heat rate for each prime mover/fuel combination df["heat_rate_mmbtu_mwh"] = ( df["fuel_consumed_for_electricity_mmbtu"] / df["net_generation_mwh"] ) return df
907ac6ba469a65dfe25a84f7498e66b1e0535d19
704,642
from datetime import datetime def datefix(datestr): """ transform string into a python datetime object handle mm/dd/yy or mm/dd/yyyy or dashes instead of slashes """ fix = datestr.replace('-','/') if len(fix) > 4: try: return datetime.strptime(fix, "%m/%d/%y") except ValueError: return datetime.strptime(fix, "%m/%d/%Y") return datetime.utcnow()
2cb728dfcec24b350d63a79fc3964d3325780b6a
704,644
import re def cleanHtml(sentence): """ remove all Html canvas from the sentence :param sentence {str} sentence :return: {str}: sentence without html canvas """ cleanr = re.compile('<.*?>') cleantext = re.sub(cleanr, ' ', str(sentence)) return cleantext
1a3edcd7227468f8f3102525538a728a9bc93fc0
704,645
def fit_index(dataset, list_variables): """ Mapping between index and category, for categorical variables For each (categorical) variable, create 2 dictionaries: - index_to_categorical: from the index to the category - categorical_to_index: from the category to the index Parameters ---------- dataset: pandas.core.frame.DataFrame DataFrame with (partly) categorical variables list_variables: list(str) List of variable names to index Returns ------- index: dict For each categorical column, we have the 2 mappings: idx2cat & idx2cat """ index = dict() for icol in list_variables: if icol not in dataset.columns: raise RuntimeError(f'{icol} not found in dataframe') idx2cat = {ii: jj for ii, jj in enumerate(dataset.loc[:, icol].unique())} cat2idx = {jj: ii for ii, jj in idx2cat.items()} index[icol] = { 'index_to_categorical': idx2cat, 'categorical_to_index': cat2idx } return index
7b8c73a5d23de2e537c1f28078d2e032095d6b1c
704,648
def convert_bin_to_text(bin_str: str) -> str: """Convert a string of binary to text. Parameters: ----------- bin_str: string: A string of binary, terminating with 00000000. Returns: -------- text: string: A plaintext representation of the binary string. """ # get number of characters, less one for the terminating 00000000 bit. num_chars = int(len(bin_str)/8) - 1 print(bin_str) text = "" for i in range(num_chars): ascii_val = int(bin_str[i*8:(i+1)*8:], 2) text += chr(ascii_val) return text
8890ff192ae4b6e01401dd7f018bf8906c3c37ce
704,650
def scale_on_x_list(x_list, scaler): """Scale list of ndarray. """ return [scaler.transform(e) for e in x_list]
2fbe36cb23e99ca6eaf277fb5509e2e997ec4a52
704,651
import hashlib def md5(ori_str): """ MD5加密算法 :param ori_str: 原始字符串 :return: 加密后的字符串 """ md5_obj = hashlib.md5() md5_obj.update(ori_str.encode("utf8")) return md5_obj.hexdigest()
75efc3226c2f0355ce4b988acd6dcd1a95ea8294
704,652
import getpass def getuser() -> str: """ Get the username of the current user. Will leverage the ``getpass`` package. Returns: str: The username of the current user """ return getpass.getuser()
3f6053e9aba37f7eafcd7735d7509af290fd3940
704,653
def cal_pipe_equivalent_length(tot_bui_height_m, panel_prop, total_area_module): """ To calculate the equivalent length of pipings in buildings :param tot_bui_height_m: total heights of buildings :type tot_bui_height_m: float :param panel_prop: properties of the solar panels :type panel_prop: dict :param total_area_module: total installed module area :type total_area_module: float :return: equivalent lengths of pipings in buildings :rtype: dict """ # local variables lv = panel_prop['module_length_m'] # module length total_area_aperture = total_area_module * panel_prop['aperture_area_ratio'] number_modules = round(total_area_module / panel_prop['module_area_m2']) # this is an estimation # main calculation l_ext_mperm2 = (2 * lv * number_modules / total_area_aperture) # pipe length within the collectors l_int_mperm2 = 2 * tot_bui_height_m / total_area_aperture # pipe length from building substation to roof top collectors Leq_mperm2 = l_int_mperm2 + l_ext_mperm2 # in m/m2 aperture pipe_equivalent_lengths = {'Leq_mperm2': Leq_mperm2, 'l_ext_mperm2': l_ext_mperm2, 'l_int_mperm2': l_int_mperm2} return pipe_equivalent_lengths
60c95cc1c5a38876095a77f4e68ab3b0df6280a3
704,654
def format_parameters(section): """Format the "Parameters" section.""" def format_item(item): item = map(lambda x: x.strip(), item) return ' - **{0}**: *{1}*\n {2}'.format(*item) return '**Parameters**\n\n{0}'.format('\n\n'.join( map(format_item, section)))
8f1393b843b6ea46d69d5644f932f7f0e62160ab
704,655
def normalize_trinucleotide(trinucleotide): """Return the normalized representation of the input trinucleotide sequence Notes ----- Each trinucleotide sequence has two possible representations (the sequence and its reverse complement). For example, 5'-ACG-3' and 5'-CGT-3' are two representations of the same trinucleotide sequence. To prevent ambiguity, choose the representation where the central nucleotide of the trinucleotide context is a C or a T is chosen. """ # Consistency checks assert len(trinucleotide) == 3 for letter in trinucleotide: assert letter in ['A', 'C', 'G', 'T'] complement_map = {'A':'T', 'T':'A', 'C':'G', 'G':'C'} reverse_complement = "" for letter in trinucleotide[::-1]: reverse_complement += complement_map[letter] # Consistency checks assert len(reverse_complement) == 3 for letter in reverse_complement: assert letter in ['A', 'C', 'G', 'T'] # Choose the seq where the middle nucleotide is a 'C' or a 'T' if trinucleotide[1] in ['C', 'T']: return trinucleotide elif reverse_complement[1] in ['C', 'T']: return reverse_complement else: raise Exception("Unexpected error.")
fe04ba6fad28285eac9becbbd6e5324ec7734850
704,660
import math def format_float(number, decimal_places): """ Accurately round a floating-point number to the specified decimal places (useful for formatting results). """ divisor = math.pow(10, decimal_places) value = number * divisor + .5 value = str(int(value) / divisor) frac = value.split('.')[1] trail_len = decimal_places - len(frac) return value + ''.join(['0'] * trail_len)
e7aaa92025284489075ce053319c27310bb96a00
704,662
def decode_transaction_filter(metadata_bytes): """Decodes transaction filter from metadata bytes Args: metadata_bytes (str): Encoded list of transaction filters Returns: decoded transaction_filter list """ transaction_filter = [] if not metadata_bytes: return None for i in metadata_bytes: transaction_filter.append(int(i)) return transaction_filter
c76638f6592fb098e2878471746152aa9df9a694
704,663
def merge_schema(original: dict, other: dict) -> dict: """Merge two schema dictionaries into single dict Args: original (dict): Source schema dictionary other (dict): Schema dictionary to append to the source Returns: dict: Dictionary value of new merged schema """ source = original.copy() for key, value in other.items(): if key not in source: source[key] = value else: if isinstance(value, list): source[key].extend(value) elif isinstance(value, dict): source[key] = merge_schema(source[key], value) else: source[key] = value return source
6425b64e6ab166ac14afc2e47392745903b8fd12
704,667
import hashlib def hash160(s: bytes) -> bytes: """ sha256 followed by ripemd160 :param s: data :return: hashed data """ return hashlib.new('ripemd160', hashlib.sha256(s).digest()).digest()
7b18fcdf51db707a17d5408c7b364818a6c5ee0c
704,668
import re def valid_email(email): """Check for a valid email address. Args: email (str): Email. Returns: bool: Return True if in valid email format and False if not. """ return bool(re.match('^[a-zA-Z0-9.!#$%&’*+/=?^_`{|}~-]+@[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*$', email))
01c343008229fb2fdf2af3a9a74f3059930696eb
704,669
def PeerDownHasBgpNotification(reason): """Determine whether or not a BMP Peer Down message as a BGP notification. Args: reason: the Peer Down reason code (from the draft) Returns: True if there will be a BGP Notification, False if not """ return reason == 1 or reason == 3
8ee214798f6766916e8784dd907eeb45ff6620db
704,670
def set_purpose(slack_client, channel, purpose): """ Set the purpose of a given channel. """ response = slack_client.api_call("channels.setPurpose", purpose=purpose, channel=channel) return response
786a495b55300b955e2f7ec525117be75b251a07
704,671
def get_validate_result_form(tel_num, validate_code): """ Assemble form for get_validate_result :param tel_num: Tel number :param validate_code: Validate code from capcha image :return: Param in dict """ post_data_dict = dict() post_data_dict['source'] = 'wsyyt' post_data_dict['telno'] = tel_num post_data_dict['validcode'] = validate_code return post_data_dict
6340c97522a097c0cf96170e08466fb795e16dc3
704,674
def metric_max_over_ground_truths(metric_fn, predictions, ground_truths): """Take the average best score against all ground truth answers. This is a bit different than SQuAD in that there are multiple answers **and** predictions that we average over. For some situations (e.g., *top k* beams or multiple human references) we might want to calculate the average performance. In most cases, however, predictions will be a list of length 1. Args: metric_fn: Callable on (prediction, ground_truth). predictions: List of whitespace separated prediction tokens. ground_truths: List of whitespace separated answer tokens. Returns: max_score: Max output of metric_fn. """ all_metrics = [] for prediction in predictions: scores_for_ground_truths = [] for ground_truth in ground_truths: score = metric_fn(prediction, ground_truth) scores_for_ground_truths.append(score) all_metrics.append(max(scores_for_ground_truths)) return sum(all_metrics) / len(all_metrics)
7c78fc1cca29bc9784a4e4687d794c1f2b6872c9
704,678
def not_contains(a, b): """Evaluates a does not contain b""" result = False if b in a else True return result
a0dc087049c8e93c1acdf0e59e3530a6ff8b54e5
704,684
import csv def import_town(data_file): """ Reads town raster data from a CSV file. Parameters ---------- data_file : str Name of CSV raster data file to use for the town. Returns ------- town : list List (cols) of lists (rows) representing raster data of the town. """ # Read in town data and format it as a list of lists with open(data_file, newline = "") as f: reader = csv.reader(f, quoting=csv.QUOTE_NONNUMERIC) town = [] for row in reader: rowlist = [] for value in row: rowlist.append(value) town.append(rowlist) return town
b7749dfd4d698fddfe610c6a51c8ccc43c375cc2
704,686