content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
import re def check_exact_match(line, expected_line): """ Uses regular expressions to find an exact (not partial) match for 'expected_line' in 'line', i.e. in the example below it matches 'foo' and succeeds: line value: '66118.999958 - INFO - [MainThread] - ly_test_tools.o3de.asset_processor - foo' expected_line: 'foo' :param line: The log line string to search, i.e. '9189.9998188 - INFO - [MainThread] - example.tests.test_system_example - Log Monitoring test 1' :param expected_line: The exact string to match when searching the line param, i.e. 'Log Monitoring test 1' :return: An exact match for the string if one is found, None otherwise. """ # Look for either start of line or whitespace, then the expected_line, then either end of the line or whitespace. # This way we don't partial match inside of a string. So for example, 'foo' matches 'foo bar' but not 'foobar' regex_pattern = re.compile("(^|\\s){}($|\\s)".format(re.escape(expected_line)), re.UNICODE) if regex_pattern.search(line) is not None: return expected_line return None
d01eaa13c40d66999e870d3b287ac869f64ae314
4,663
def rounding_filters(filters, w_multiplier): """ Calculate and round number of filters based on width multiplier. """ if not w_multiplier: return filters divisor = 8 filters *= w_multiplier new_filters = max(divisor, int(filters + divisor / 2) // divisor * divisor) if new_filters < 0.9 * filters: # prevent rounding by more than 10% new_filters += divisor return int(new_filters)
eb2938732792564fd324602fd74be41e6f88b265
4,664
from typing import List def get_gate_names_2qubit() -> List[str]: """Return the list of valid gate names of 2-qubit gates.""" names = [] names.append("cx") names.append("cz") names.append("swap") names.append("zx90") names.append("zz90") return names
d3d7f20263805a186d9142ec087039eb53076346
4,665
def compute_log_zT_var(log_rho_var, log_seebeck_sqr_var, log_kappa_var): """Compute the variance of the logarithmic thermoelectric figure of merit zT. """ return log_rho_var + log_seebeck_sqr_var + log_kappa_var
3528181796aeafb3df5eac09b06852afe028cb13
4,666
import colorsys import hashlib def uniqueColor(string): """ Returns a color from the string. Same strings will return same colors, different strings will return different colors ('randomly' different) Internal: string =md5(x)=> hex =x/maxhex=> float [0-1] =hsv_to_rgb(x,1,1)=> rgb =rgb_to_int=> int :param string: input string :return: int color """ return sum(round(c * 255) << d for c, d in zip(colorsys.hsv_to_rgb(int(hashlib.md5(string.encode('utf-8')).hexdigest(), 16) / 2 ** 128, 1, 1), [16, 8, 0]))
0c895612c3bf2dd5f594a15daf6f2aa5d778eeb0
4,668
def kappa(a, b, c, d): """ GO term 2 | yes | no | ------------------------------- GO | yes | a | b | term1 | no | c | d | kapa(GO_1, GO_2) = 1 - (1 - po) / (1 - pe) po = (a + d) / (a + b + c + d) marginal_a = ( (a + b) * ( a + c )) / (a + b + c + d) marginal_b = ( (c + d) * ( b + d )) / (a + b + c + d) pe = (marginal_a + marginal_b) / (a + b + c + d) """ a = float(len(a)) b = float(len(b)) c = float(len(c)) d = float(len(d)) po = (a + d) / (a + b + c + d) marginal_a = ( (a + b) * ( a + c )) / (a + b + c + d) marginal_b = ( (c + d) * ( b + d )) / (a + b + c + d) pe = (marginal_a + marginal_b) / (a + b + c + d) #print (f" {a} | {b}\n {c} | {d}") return 1 - (1 - po) / (1 - pe)
5884a6745f6a93b044eabb1bfe38834cb59366d4
4,671
def zfsr32(val, n): """zero fill shift right for 32 bit integers""" return (val >> n) if val >= 0 else ((val + 4294967296) >> n)
4b890caa0b7b086e923e7b229e5551fd66d24016
4,672
from typing import Union from pathlib import Path def is_dir(path: Union[str, Path]) -> bool: """Check if the given path is a directory :param path: path to be checked """ if isinstance(path, str): path = Path(path) if path.exists(): return path.is_dir() else: return str(path).endswith("/")
540cce7f5c6a25186427ba71b94aa090c2ab90a7
4,673
import re def document_to_vector(lemmatized_document, uniques): """ Converts a lemmatized document to a bow vector representation. 1/0 for word exists/doesn't exist """ #print(uniques) # tokenize words = re.findall(r'\w+', lemmatized_document.lower()) # vector = {} vector = [0]*len(uniques) # list of the words is accessible via vector.keys() # list of 0/1 is accessible via vector.values() # seen = [] for i in range(len(uniques)): for j in range(len(words)): if uniques[i] == words[j]: vector[i] = 1 continue return vector
e4b108b8e99a827788d7eff5d4eabf71021d6e21
4,674
def cuda_reshape(a, shape): """ Reshape a GPUArray. Parameters: a (gpu): GPUArray. shape (tuple): Dimension of new reshaped GPUArray. Returns: gpu: Reshaped GPUArray. Examples: >>> a = cuda_reshape(cuda_give([[1, 2], [3, 4]]), (4, 1)) array([[ 1.], [ 2.], [ 3.], [ 4.]]) >>> type(a) <class 'pycuda.gpuarray.GPUArray'> """ return a.reshape(shape)
966cae8aeb88aeaeada28a11c284920746771f00
4,677
def update_cv_validation_info(test_validation_info, iteration_validation_info): """ Updates a dictionary with given values """ test_validation_info = test_validation_info or {} for metric in iteration_validation_info: test_validation_info.setdefault(metric, []).append(iteration_validation_info[metric]) return test_validation_info
b2509026e968b1c428836c5313e9c5e824663d4f
4,680
def sent2labels(sent): """ Extracts gold labels for each sentence. Input: sentence list Output: list with labels list for each token in the sentence """ # gold labels at index 18 return [word[18] for word in sent]
11b4dc93c465d154e8bf8688a5b5c592b94e7265
4,682
def get_course_id_from_capa_module(capa_module): """ Extract a stringified course run key from a CAPA module (aka ProblemBlock). This is a bit of a hack. Its intended use is to allow us to pass the course id (if available) to `safe_exec`, enabling course-run-specific resource limits in the safe execution environment (codejail). Arguments: capa_module (ProblemBlock|None) Returns: str|None The stringified course run key of the module. If not available, fall back to None. """ if not capa_module: return None try: return str(capa_module.scope_ids.usage_id.course_key) except (AttributeError, TypeError): # AttributeError: # If the capa module lacks scope ids or has unexpected scope ids, we # would rather fall back to `None` than let an AttributeError be raised # here. # TypeError: # Old Mongo usage keys lack a 'run' specifier, and may # raise a type error when we try to serialize them into a course # run key. This is tolerable because such course runs are deprecated. return None
dd76b1d6df12f6c7db0d095bb9a48940a850e5c7
4,683
def stream_name_mapping(stream, exclude_params=['name'], reverse=False): """ Return a complete dictionary mapping between stream parameter names to their applicable renames, excluding parameters listed in exclude_params. If reverse is True, the mapping is from the renamed strings to the original stream parameter names. """ filtered = [k for k in stream.param if k not in exclude_params] mapping = {k:stream._rename.get(k,k) for k in filtered} if reverse: return {v:k for k,v in mapping.items()} else: return mapping
5a9c9ab80ad470c45d22f2e360cc53c979300825
4,690
def helping_func(self, driver, value): """Helper function for testing method composition. """ return value + 1
2a204814213707a255b0b0e57e4d5ca23389045d
4,700
def get_with_label(label, tree): """ Get a tree's node given it's label """ return [n for n in tree.children if n.label == label][0]
fc976bcbbf8f5a03b2a17dd7b5c0061a22bedf60
4,702
import re def is_hex(hex_str): """Helper function to verify a string is a hex value.""" return re.fullmatch('[0-9a-f]+', hex_str)
c5a53ccbcec36d77bee88d9c81aea46d2a0eec2d
4,710
import tarfile import io def _unpack(stream: bytes, path: str) -> str: """Unpack archive in bytes string into directory in ``path``.""" with tarfile.open(fileobj=io.BytesIO(stream)) as tar: tar.extractall(path) return path
81a05c0a60fb06d43592a0a4f4d30cf62d406e01
4,717
import json def check_geometry_size(footprint): """ Excessive large geometries are problematic of AWS SQS (max size 256kb) and cause performance issues becuase they are stored in plain text in the JSON blob. This func reads the geojson and applies a simple heuristic to reduce the footprint size through simplification. With each iteration, the geometry is simplified by 0.01 degrees. Parameters ---------- footprint : obj A shapely Polygon or MultiPolygon Returns ------- geojson : dict A geojson representation of the geometry """ geojson = footprint.__geo_interface__ as_str = json.dumps(geojson) geomsize = len(as_str.encode('utf-8')) n_iterations = 0 while geomsize > 125000: footprint = footprint.simplify(0.01) geojson = footprint.__geo_interface__ as_str = json.dumps(geojson) geomsize = len(as_str.encode('utf-8')) n_iterations += 1 return geojson
b2525958a1440fc1ce0d2560150b7fe28b3ec450
4,722
def cov(sources): """ Given the array of sources for all image patches, calculate the covariance array between all modes. Parameters ---------- sources : numpy array (floats) The {NUM_MODES x NUM_PATCHES} array of sources. Returns ------- numpy array (floats) The {NUM_MODES x NUM_MODES} covariance array between all modes. """ return (sources @ sources.T)/sources.shape[1]
268dfbc98a5b443e92aadd27ba577f7911ca398f
4,724
def sub(x, y): """sub two numbers""" return y-x
345279da515a877c1f08a8b54ff8f2e7d6a95fec
4,725
def create_error_payload(exception, message, endpoint_id): """ Creates an error payload to be send as a response in case of failure """ print(f'{exception}: {message}') error_payload = { 'status': 'MESSAGE_NOT_SENT', 'endpointId': endpoint_id if endpoint_id else 'NO_ENDPOINT_ID', 'message': f'{exception}: {message}' } return error_payload
90f266d22429d385e828dcdd92fca3d7b2e6df48
4,728
def rate(epoch, rate_init, epochs_per_order): """ Computes learning rate as a function of epoch index. Inputs: epoch - Index of current epoch. rate_init - Initial rate. epochs_per_order - Number of epochs to drop an order of magnitude. """ return rate_init * 10.0 ** (-epoch / epochs_per_order)
cc1c7850d4bd98d30b97c7915ceb96eaeadef327
4,734
def match_countries(df_to_match, olympics): """Changes the names of the countries in the df_to_match df so that they match the names of the countries in the olympics df. Parameters ----------- df_to_match : either of the two dataframes: - gdp - pop olympics : the olympics dataframe Returns ----------- df_to_match : the dataframe given as first parameter that now its countries match the countries in the olympics df common_countries : a list with the common countries in the two dataframes """ # countries in the to_match df df_countries = set(df_to_match.columns.tolist()) # countries in the olympics df ol_regions = set(sorted(olympics.region.unique().tolist())) # countries in the to_match df that are not in the olympics df not_in_ol = df_countries.difference(ol_regions) # countries in the olympics df that are not in the to_match df not_in_df = ol_regions.difference(df_countries) # After printing not_in_il and not_int_df, we see that some countries are simply named differently # Therefore, I renames these countries in the to_match df so that they match the countries from the olympics df df_to_match.rename(columns={"United States": "USA", "United Kingdom": "UK", "Antigua and Barbuda": "Antigua", "Congo, Dem. Rep.": "Democratic Republic of the Congo", "Lao": "Laos", "North Macedonia": "Macedonia", "Cote d'Ivoire": "Ivory Coast", "Trinidad and Tobago": "Trinidad", "Micronesia, Fed. Sts.": "Micronesia", "St. Vincent and the Grenadines": "Saint Vincent", "St. Lucia": "Saint Lucia", "St. Kitts and Nevis": "Saint Kitts", "Slovak Republic": "Slovakia", "Kyrgyz Republic": "Kyrgyzstan", "Bolivia": "Boliva", "Congo, Rep.": "Republic of Congo"}, inplace=True) # Check which countries still remain unmatched df_countries = set(df_to_match.columns.tolist()) ol_regions = set(sorted(olympics.region.unique().tolist())) # Countries in the to_match df that are still not in the olympics df not_in_ol = df_countries.difference(ol_regions) # Countries in the olympics df that are still not in the to_match df not_in_df = ol_regions.difference(df_countries) # Printing not_in_ol and not_in_df shows which countries are still not matched. Used as a check. # save the resulting common countries common_countries = ol_regions.intersection(df_countries) return df_to_match, common_countries
256eaac81daee5c621e7dac4c8c27d0b96868418
4,737
import requests import logging def get_reply(session, url, post=False, data=None, headers=None, quiet=False): """ Download an HTML page using the requests session. Low-level function that allows for flexible request configuration. @param session: Requests session. @type session: requests.Session @param url: URL pattern with optional keywords to format. @type url: str @param post: Flag that indicates whether POST request should be sent. @type post: bool @param data: Payload data that is sent with request (in request body). @type data: object @param headers: Additional headers to send with request. @type headers: dict @param quiet: Flag that tells whether to print error message when status code != 200. @type quiet: bool @return: Requests response. @rtype: requests.Response """ request_headers = {} if headers is None else headers request = requests.Request('POST' if post else 'GET', url, data=data, headers=request_headers) prepared_request = session.prepare_request(request) reply = session.send(prepared_request) try: reply.raise_for_status() except requests.exceptions.HTTPError as e: if not quiet: logging.error("Error %s getting page %s", e, url) logging.error("The server replied: %s", reply.text) raise return reply
4baa985db090d0f88762c8f6cfadff084f2b88ad
4,738
def mermin_klyshko_quantum_bound(n): """The quantum bound for the Mermin-Klyshko inequality is :math:`2^{3(n-1)/2}`. :param n: The number of measurement nodes. :type n: Int :returns: The quantum bound. :rtype: Float """ return 2 ** (3 * (n - 1) / 2)
721ca41b19ef72cae77baf1ad6dea5377b6eb67d
4,740
def parse_q(s): """Parse the value of query string q (?q=) into a search sub-term.""" if '=' not in s: names = s.split() term = '/'.join(map(lambda x: 'n.name=' + x, names)) return term else: subterms = s.split() res = [] for subterm in subterms: if '=' not in subterm: res.append('n.name=' + subterm) else: res.append(subterm) term = '&'.join(res) return term
eae907fcb42be4a2c4be26316721ea63aa0284d6
4,742
def getattr_by_path(obj, attr, *default): """Like getattr(), but can go down a hierarchy like 'attr.subattr'""" value = obj for part in attr.split('.'): if not hasattr(value, part) and len(default): return default[0] value = getattr(value, part) if callable(value): value = value() return value
3eccbb39e1781a75a6f0061c1c226cefdcfb17c8
4,744
def get_stack_value(stack, key): """Get metadata value from a cloudformation stack.""" for output in stack.outputs: if output['OutputKey'] == key: return output['OutputValue']
a6b193c7d884bac78668dfd85bc2a5cbbb6b3f3b
4,745
from typing import MutableMapping def flatten(d, separator='_', parent_key=None): """ Converts a nested hierarchy of key/value object (e.g. a dict of dicts) into a flat (i.e. non-nested) dict. :param d: the dict (or any other instance of collections.MutableMapping) to be flattened. :param separator: the separator to use when concatenating nested key names into flattened key names. :param parent_key: used internally for recursion. :return: a flattened dict (i.e. containing no nested dicts as values). """ if separator is None: separator = '_' if parent_key is None: parent_key = '' dict_type = dict if d is None else type(d) items = [] for k, v in d.items(): new_key = parent_key + separator + k if parent_key else k if isinstance(v, MutableMapping): items.extend(flatten(v, separator=separator, parent_key=new_key).items()) else: items.append((new_key, v)) return dict_type(items)
d07daba5007c4c4efee1ccb2033a42e9a52a7efb
4,751
def divide_list(array, number): """Create sub-lists of the list defined by number. """ if len(array) % number != 0: raise Exception("len(alist) % number != 0") else: return [array[x:x+number] for x in range(0, len(array), number)]
09882945b971ce13f7983c33562df0dfde77165c
4,752
def get_exception_message(exception: Exception) -> str: """Returns the message part of an exception as string""" return str(exception).strip()
6e002329425f716115a5fddb32cbf36cf568ee81
4,753
def _get_frame_time(time_steps): """ Compute average frame time. :param time_steps: 1D array with cumulative frame times. :type time_steps: numpy.ndarray :return: The average length of each frame in seconds. :rtype: float """ if len(time_steps.shape) != 1: raise ValueError("ERROR: Time series must be a 1D array.") frame_time = time_steps[-1]/(len(time_steps) - 1) # Need to ignore the first frame (0). return frame_time
e849e5d6bcbc14af357365b3e7f98f1c50d93ee4
4,754
def query_table3(song): """ This function returns the SQL neccessary to get all users who listened to the song name passed as an argument to this function. """ return "select user_name from WHERE_SONG where song_name = '{}';".format(song)
ed9a3fb7eb369c17027871e28b02600b78d483a9
4,755
def extract_sha256_hash(hash): """Extrach SHA256 hash or return None """ prefix = 'sha256:' if hash and hash.startswith(prefix): return hash.replace(prefix, '') return None
11e9f352f3783657d52772c4b69387151d13f3d2
4,761
def get_block(blockidx, blocksz, obj): """ Given obj, a list, return the intersection of obj[blockidx*blocksz:(blockidx+1)*blocksz] and obj Ex: get_block(2, 100, range(250) returns [200, 201, ..., 249] """ if blockidx*blocksz > len(obj): return [] elif (blockidx+1)*blocksz > len(obj): return obj[blockidx*blocksz:] else: return obj[blockidx*blocksz:(blockidx+1)*blocksz]
8666cc30be23619a49f899beec17d3ba1f0fb357
4,768
import platform def get_dataset_mrnet_args(parser, args=[]): """ Get all relevant parameters to handle the dataset -> here: MRNET """ # determine path if platform.system() == "Linux": path = "/home/biomech/Documents/OsteoData/MRNet-v1.0/" else: path = "C:/Users/Niko/Documents/data/MRNet-v1.0/MRNet-v1.0" # path = "C:/Users/ga46yeg/data/MRNet-v1.0" # Dataset MRNet: # ------------------------------------------------------------------------ parser.add_argument( "--root_dir_mrnet", type=str, default=path, help="Directory of the dataset" ) parser.add_argument( "--perspectives", type=list, default=["axial", "coronal", "sagittal"], help="Perspectives of the Mr Scans", ) parser.add_argument( "--classes", type=list, default=["abn", "acl", "men"], help="Classify for these classes", ) # ------------------------------------------------------------------------ return parser
466cb843fca4a09f52a72603dcd2c4379ea1e54d
4,769
def _trim_name(image): """Remove the slash at the end of the filename.""" return image[:-1] if image[-1] == '/' else image
823dd63920673352a18d73f83190853d5a234483
4,773
from typing import List def _get_public_props(obj) -> List[str]: """Return the list of public props from an object.""" return [prop for prop in dir(obj) if not prop.startswith('_')]
7b3be3e186bc009329ed417c6685fb2503a7c993
4,781
def remap(value, oldMin, oldMax, newMin, newMax): """ Remaps the value to a new min and max value Args: value: value to remap oldMin: old min of range oldMax: old max of range newMin: new min of range newMax: new max of range Returns: The remapped value in the new range """ return newMin + (((value - oldMin) / (oldMax - oldMin)) * (newMax - newMin))
c0e53ce2b2169b08d271f7077e552762c572cf1f
4,783
def _get_active_tab(visible_tabs, request_path): """ return the tab that claims the longest matching url_prefix if one tab claims '/a/{domain}/data/' and another tab claims '/a/{domain}/data/edit/case_groups/' then the second tab wins because it's a longer match. """ matching_tabs = sorted( (url_prefix, tab) for tab in visible_tabs for url_prefix in tab.url_prefixes if request_path.startswith(url_prefix) ) if matching_tabs: _, tab = matching_tabs[-1] return tab
ac9cd34d4b4ee1c1c0356499b389c1f6a7195585
4,785
def _get_pattern_nts(rule): """ Return a list of NT names present in given rule. """ nt_names = [] for bt in rule.ipattern.bits: if bt.is_nonterminal(): nt_name = bt.nonterminal_name() nt_names.append(nt_name) return nt_names
e690e9187aaff0cf3138444db085e15adfda3847
4,792
def stopping_player(bot, state): """ A Player that just stands still. """ return bot.position
72628e39d26760eedc9a0e85a8279ac530ab851d
4,793
def before_after_text(join_set, index, interval_list): """ Extracts any preceeding or following markup to be joined to an interval's text. """ before_text, after_text = '', '' # Checking if we have some preceeding or following markup to join with. if join_set: if index > 0: before_text = ''.join(character for character in interval_list[index - 1][2] if character in join_set) if index < len(interval_list) - 1: after_text = ''.join(character for character in interval_list[index + 1][2] if character in join_set) return before_text, after_text
b2c63fe1e7ea5bb204e41b27bc79d2c81964369a
4,795
import typing import requests def download_file_from_google_drive( gdrive_file_id: typing.AnyStr, destination: typing.AnyStr, chunk_size: int = 32768 ) -> typing.AnyStr: """ Downloads a file from google drive, bypassing the confirmation prompt. Args: gdrive_file_id: ID string of the file to download from google drive. destination: where to save the file. chunk_size: chunk size for gradual downloads. Returns: The path to the downloaded file. """ # taken from this StackOverflow answer: https://stackoverflow.com/a/39225039 URL = "https://docs.google.com/uc?export=download" session = requests.Session() response = session.get(URL, params={'id': gdrive_file_id}, stream=True) token = None for key, value in response.cookies.items(): if key.startswith('download_warning'): token = value if token: params = {'id': gdrive_file_id, 'confirm': token} response = session.get(URL, params=params, stream=True) with open(destination, "wb") as f: for chunk in response.iter_content(chunk_size): if chunk: # filter out keep-alive new chunks f.write(chunk) return destination
29cdcc509aa21a6f2ae14ed18f2c0523bbdbd5a4
4,796
def lines_in_file(filename: str) -> int: """ Count the number of lines in a file :param filename: A string containing the relative or absolute path to a file :returns: The number of lines in the file """ with open(filename, "r") as f: return len(f.readlines())
d71b5c8de1b4eb9a45988e06c17a129f4a19f221
4,799
import re def md_changes(seq, md_tag): """Recreates the reference sequence of a given alignment to the extent that the MD tag can represent. Note: Used in conjunction with `cigar_changes` to recreate the complete reference sequence Args: seq (str): aligned segment sequence md_tag (str): MD tag for associated sequence Returns: ref_seq (str): a version of the aligned segment's reference sequence given \ the changes reflected in the MD tag Raises: ValueError: if MD tag is None Example: >>> md_changes('CTTATATTGGCCTT', '3C4AT4') 'CTTCTATTATCCTT' """ if md_tag is None: raise ValueError('No MD tag found or given for sequence') ref_seq = '' last_md_pos = 0 for mo in re.finditer(r'(?P<matches>\d+)|(?P<del>\^\w+?(?=\d))|(?P<sub>\w)', md_tag): mo_group_dict = mo.groupdict() if mo_group_dict['matches'] is not None: matches = int(mo_group_dict['matches']) ref_seq += seq[last_md_pos:last_md_pos + matches] last_md_pos += matches elif mo_group_dict['del'] is not None: deletion = mo_group_dict['del'] ref_seq += deletion[1:] elif mo_group_dict['sub'] is not None: substitution = mo_group_dict['sub'] ref_seq += substitution last_md_pos += 1 else: pass return ref_seq
f8591d0084f6c10c9bbd1a39b3f9e13cfe952e68
4,801
def get_auto_scaling_group(asg, asg_name: str): """Get boto3 Auto Scaling Group by name or raise exception""" result = asg.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name]) groups = result["AutoScalingGroups"] if not groups: raise Exception("Auto Scaling Group {} not found".format(asg_name)) return groups[0]
07176e538cdb265ae86b16a5d36bf1b274f45c19
4,802
def join_epiweek(year, week): """ return an epiweek from the (year, week) pair """ return year * 100 + week
fdbc50f8a953ef7307e9558019b3c2b50bc65be4
4,803
def _map_channels_to_measurement_lists(snirf): """Returns a map of measurementList index to measurementList group name.""" prefix = "measurementList" data_keys = snirf["nirs"]["data1"].keys() mls = [k for k in data_keys if k.startswith(prefix)] def _extract_channel_id(ml): return int(ml[len(prefix) :]) return {_extract_channel_id(ml): ml for ml in mls}
d6d83c01baec5f345d58fff8a0d0107a40b8db37
4,806
def partition(lst, size): """Partition list @lst into eveni-sized lists of size @size.""" return [lst[i::size] for i in range(size)]
af7071a5aac36a51f449f153df145d9218808a4a
4,808
def get_input_var_value(soup, var_id): """Get the value from text input variables. Use when you see this HTML format: <input id="wired_config_var" ... value="value"> Args: soup (soup): soup pagetext that will be searched. var_id (string): The id of a var, used to find its value. Returns: (string): The value of the variable """ try: var_value = soup.find('input', {'id': var_id}).get('value') return var_value except AttributeError: print('\nERROR: <' + var_id + '> not found!\nPagesoup:\n\n', soup) raise LookupError
5a9dd65a285c62e0e5e79584858634cb7b0ece75
4,813
def table_from_bool(ind1, ind2): """ Given two boolean arrays, return the 2x2 contingency table ind1, ind2 : array-like Arrays of the same length """ return [ sum(ind1 & ind2), sum(ind1 & ~ind2), sum(~ind1 & ind2), sum(~ind1 & ~ind2), ]
497ce6ad1810386fedb6ada9ba87f0a5baa6318a
4,818
def sample_duration(sample): """Returns the duration of the sample (in seconds) :param sample: :return: number """ return sample.duration
9aaddb69b106ad941e3d1172c8e789b4969da99d
4,824
def zero_check(grid): """Take a 2d grid and calculates number of 0 entries.""" zeros = 0 for row in grid: for element in row: if element == 0: zeros += 1 return zeros
0d69a948eef96937f8a5033256c3c4d9f22ce14d
4,828
def create_gw_response(app, wsgi_env): """Create an api gw response from a wsgi app and environ. """ response = {} buf = [] result = [] def start_response(status, headers, exc_info=None): result[:] = [status, headers] return buf.append appr = app(wsgi_env, start_response) close_func = getattr(appr, 'close', None) try: buf.extend(list(appr)) finally: close_func and close_func() response['body'] = ''.join(buf) response['statusCode'] = result[0].split(' ', 1)[0] response['headers'] = {} for k, v in result[1]: response['headers'][k] = v if 'Content-Length' not in response['headers']: response['headers']['Content-Length'] = str(len(response['body'])) if 'Content-Type' not in response['headers']: response['headers']['Content-Type'] = 'text/plain' return response
73dd8459cbf9b79655137536ff42195ba62c1372
4,830
import json def decode_classnames_json(preds, top=5): """ Returns class code, class name and probability for each class amongst top=5 for each prediction in preds e.g. [[('n01871265', 'tusker', 0.69987053), ('n02504458', 'African_elephant', 0.18252705), ... ]] """ if len(preds.shape) != 2 or preds.shape[1] != 1000: raise ValueError('`decode_classnames_json` expects ' 'a batch of predictions ' '(i.e. a 2D array of shape (samples, 1000)). ' 'Found array with shape: ' + str(preds.shape)) with open('imagenet_class_index.json') as data_file: data = json.load(data_file) results = [] for pred in preds: top_indices = pred.argsort()[-top:][::-1] result = [tuple(data[str(i)]) + (pred[i],) for i in top_indices] results.append(result) return results
807bed051300801a5e6a92bbc96324a66050f6c0
4,831
import math def closest_power_of_two(n): """Returns the closest power of two (linearly) to n. See: http://mccormick.cx/news/entries/nearest-power-of-two Args: n: Value to find the closest power of two of. Returns: Closest power of two to "n". """ return pow(2, int(math.log(n, 2) + 0.5))
50d78d2a6de4f689ce268a95df97aae72dbd81ac
4,833
def divisors(num): """ Takes a number and returns all divisors of the number, ordered least to greatest :param num: int :return: list (int) """ list = [] x = 0 for var in range(0, num): x = x + 1 if num % x == 0: list.append(x) return list
848ed77fa92ae1c55d90a5236f0d9db6ae2f377c
4,834
from datetime import datetime def day_start(src_time): """Return the beginning of the day of the specified datetime""" return datetime(src_time.year, src_time.month, src_time.day)
2bcc7b136e5cb1e7929e6655daf67b07dbbaa542
4,842
def align_decision_ref(id_human, title): """ In German, decisions are either referred to as 'Beschluss' or 'Entscheidung'. This function shall align the term used in the title with the term used in id_human. """ if 'Beschluss' in title: return id_human return id_human.replace('Beschluss ', 'Entscheidung ')
ac4f584b8e008576816d9a49dba58bc9c9a6dbc4
4,845
def odd_numbers_list(n): """ Returns the list of n first odd numbers """ return [2 * k - 1 for k in range(1, n + 1)]
2066cf07e926e41d358be0012a7f2a248c5987a7
4,847
def _is_si_object(storage_instance): """ Helper method for determining if a storage instance is object. Args: storage_instance: Returns: (Bool) True if object, False if not. """ si_type = storage_instance.get("service_configuration", None) if si_type is None: # object not supported on storage instance return False elif si_type == "object": return True else: return False
3cc2591bb0391e6d9d62197d0bb593f5006215c8
4,856
def set_model_weights(model, weights): """Set the given weights to keras model Args: model : Keras model instance weights (dict): Dictionary of weights Return: Keras model instance with weights set """ for key in weights.keys(): model.get_layer(key).set_weights(weights[key]) return model
0adb7294348af379df0d2a7ce2101a6dc3a43be4
4,859
def envfile_to_params(data): """ Converts environment file content into a dictionary with all the parameters. If your input looks like: # comment NUMBER=123 KEY="value" Then the generated dictionary will be the following: { "NUMBER": "123", "KEY": "value" } """ params = filter(lambda x: len(x) == 2, map(lambda x: x.strip().split("="), data.splitlines())) return { k: v[1:-1] if v.startswith('"') and v.endswith('"') else v for (k, v) in params }
03d3b4eb7ea5552938e6d42dcfd4554a1fe89422
4,862
def check_records(msg: dict) -> int: """ Returns the number of records sent in the SQS message """ records = 0 if msg is not None: records = len(msg[0]) if records != 1: raise ValueError("Not expected single record") return records
7036f943b733ca34adaaa5ff917b3eb246075422
4,863
def valid_octet (oct): """ Validates a single IP address octet. Args: oct (int): The octet to validate Returns: bool: True if the octet is valid, otherwise false """ return oct >= 0 and oct <= 255
9dd2346bb5df5bc00bb360013abe40b8039bdc45
4,865
def str_to_bool(string): """ Parses string into boolean """ string = string.lower() return True if string == "true" or string == "yes" else False
e7c1645ab3ba59fc4721872df76f406c571cab8f
4,866
import functools import warnings def warns(message, category=None): """警告装饰器 :param message: 警告信息 :param category: 警告类型:默认是None :return: 装饰函数的对象 """ def _(func): @functools.wraps(func) def warp(*args, **kwargs): warnings.warn(message, category, stacklevel=2) return func(*args, **kwargs) return warp return _
4c481dc7eeb42751aef07d87ab9da34b04c573f4
4,873
def build_sub_lattice(lattice, symbol): """Generate a sub-lattice of the lattice based on equivalent atomic species. Args: lattice (ASE crystal class): Input lattice symbol (string): Symbol of species identifying sub-lattice Returns: list of lists: sub_lattice: Cartesian coordinates of the sub-lattice of symbol """ sub_lattice = [] i = 0 atomic_labels = lattice.get_chemical_symbols() positions = lattice.get_scaled_positions() for atom in atomic_labels: if atom == symbol: sub_lattice.append(positions[i]) i = i + 1 return sub_lattice
7e7748c31f7f082b2e5ec6f21d0a56f60d5ec06c
4,874
def format_percent(percentage, pos): """ Formats percentages for the 'x' axis of a plot. :param percentage: The fraction between 0.0 and 1.0 :type percentage: float :param pos: The position argument :type pos: int :return: A formatted percentage string :rtype: str """ # pylint: disable=unused-argument return '{:.0f}%'.format(percentage * 100.)
d8566ce36b21adb351141ac72413b927e0f02c11
4,875
def get_transpose_graph(graph): """Get the transpose graph""" transpose = {node: set() for node in graph.keys()} for node, target_nodes in graph.items(): for target_node in target_nodes: transpose[target_node].add(node) return transpose
f7f8e083659e4214d79472961c7240778f37268d
4,878
def get_all_with_given_response(rdd, response='404'): """ Return a rdd only with those requests that received the response code entered. Default set to '404'. return type: pyspark.rdd.PipelinedRDD """ def status_iterator(ln): try: status = ln.split(' ')[-2] return True if status == response else False except: pass return rdd.filter(status_iterator)
8268095938bbc35a6418f557af033a458f041c89
4,881
def s3_put_bucket_website(s3_obj, bucketname, website_config): """ Boto3 client based Put bucket website function Args: s3_obj (obj): MCG or OBC object bucketname (str): Name of the bucket website_config (dict): Website configuration info Returns: dict : PutBucketWebsite response """ return s3_obj.s3_client.put_bucket_website( Bucket=bucketname, WebsiteConfiguration=website_config )
a60d95ef43e5a3643edeb6dacb2b149fef1892d9
4,883
def pretvori_v_sekunde(niz): """ Pretvori niz, ki predstavlja dolžino skladbe v formatu hh:mm:ss v število sekund. """ h, m, s = map(int, niz.split(":")) return s + m*60 + h*3600
db0cc5872109b15e635b2b1e8731a5343d63f518
4,885
import logging def _get_profiling_data(filename): """Read a given file and parse its content for profiling data.""" data, timestamps = [], [] try: with open(filename, "r") as f: file_data = f.readlines() except Exception: logging.error("Could not read profiling data.", exc_info=True) raise SystemExit(1) for line in file_data: if line == "\n": continue line = line.strip() line_data = line.split(" ") if len(line_data) != 3: continue _, mem_usage, timestamp = line.split(" ") data.append(float(mem_usage)) timestamps.append(float(timestamp)) if not data: logging.error("No samples to parse in {}.".format(filename)) raise SystemExit(1) return {"data": data, "timestamp": timestamps}
85f434c9aa22d60bae06205162623cde83e5a716
4,886
from typing import Optional from typing import List def human_size(bytes: int | float, units: Optional[List[str]] = None) -> str: """ Convert bytes into a more human-friendly format :param bytes: int Number of bytes :param units: Optional[List[str]] units used :return: str Return size in human friendly format: <number> <size_unit> """ if units is None: units = ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB'] return f"{round(bytes, 2)} " + units[0] if bytes < 1024 else human_size(bytes / 1024, units[1:])
9b652f0a09024c22dcefa5909c17f7b14d0183f4
4,895
def convert_mg_l_to_mymol_kg(o2, rho_0=1025): """Convert oxygen concentrations in ml/l to mymol/kg.""" converted = o2 * 1/32000 * rho_0/1000 * 1e6 converted.attrs["units"] = "$\mu mol/kg$" return converted
5925cf1f5629a0875bdc777bc3f142b9a664a144
4,900
def make_f_beta(beta): """Create a f beta function Parameters ---------- beta : float The beta to use where a beta of 1 is the f1-score or F-measure Returns ------- function A function to compute the f_beta score """ beta_2 = beta**2 coeff = (1 + beta_2) def f(global_, local_, node): """Compute the f-measure Parameters ---------- global_ : np.array All of the scores for a given query local_ : np.array The scores for the query at the current node node : skbio.TreeNode The current node being evaluated """ p = len(global_) / len(local_) r = len(local_) / node.ntips return coeff * (p * r) / ((beta_2 * p) + r) return f
f0e6993ac956171c58415e1605706c453d3e6d61
4,901
def lenzi(df): """Check if a pandas series is empty""" return len(df.index) == 0
561705e6ff0da3bfb03407a721f2aff71a4d42a1
4,905
def num_to_int(num): """ Checks that a numerical value (e.g. returned by robot) is an integer and not a float. Parameters ---------- num : number to check Returns ------- integer : num cast to an integer Raises ------ ValueError : if n is not an integer """ if num % 1 == 0: return int(num) else: raise ValueError('Expecting integer. Got: "{0}" ({1})' .format(num, type(num)))
af470940eb035fe8dd0160dfe9614c2b6d060194
4,906
def format_timedelta(tdelta): """Return the timedelta as a 'HH:mm:ss' string.""" total_seconds = int(tdelta.total_seconds()) hours, remainder = divmod(total_seconds, 60*60) minutes, seconds = divmod(remainder, 60) return "{0:02d}:{1:02d}:{2:02d}".format(hours, minutes, seconds)
852902e7972bcd13df8b60864ebcb2d75b2b259d
4,907
from typing import Tuple import torch def permute_adjacency_twin(t1,t2) -> Tuple[torch.Tensor,torch.Tensor]: """ Makes a permutation of two adjacency matrices together. Equivalent to a renaming of the nodes. Supposes shape (n,n) """ n,_ = t1.shape perm = torch.randperm(n) return t1[perm,:][:,perm],t2[perm,:][:,perm]
df3dc6507b8eae9d148ec9b2e664a427813d93a7
4,908
def five_fold(data_set): """[summary] Args: data_set (List of Sample objects): The Samples to be partitioned Returns: fold: where fold is list of len n in n-fold of (train,test) where train and test are lists of Samples """ partition_index = int( len(data_set) / 5 ) s = 0 fold = [] for i in range(5): #0-4 tr = data_set.copy() n = s + partition_index # was -1 te = tr[s:n] del tr[s:s + partition_index] fold.append( (tr,te) ) s += partition_index return fold
d4179c238da3e9ebe05ab3513b80bcce982c8728
4,911
def trim_filters(response): """Trim the leading and trailing zeros from a 1-D array or sequence, leaving one zero on each side. This is a modified version of numpy.trim_zeros. Parameters ---------- response : 1-D array or sequence Input array. Returns ------- first : int Index of the last leading zero. last : int Index of the first trailing zero. """ first = 0 for i in response: if i != 0.: if first == 0: first += 1 # to avoid filters with non-zero edges break else: first = first + 1 last = len(response) for i in response[::-1]: if i != 0.: if last == len(response): last -= 1 # to avoid filters with non-zero edges break else: last = last - 1 first -= 1 last += 1 return first, last
2582c5821bd5c8487c0f9d2f55d2d982767d2669
4,914
import bs4 def is_comment(obj): """Is comment.""" return isinstance(obj, bs4.Comment)
e56749b3d5f95754a031cc7286229d942333a22e
4,919
import logging def _VerifyOptions(options): """Verify the passed-in options. Args: options: The parsed options to verify. Returns: Boolean, True if verification passes, False otherwise. """ if options.endpoints_service and not options.openapi_template: logging.error('Please specify openAPI template with --openapi_template ' 'in deploying endpoints.') return False if options.openapi_template and not options.endpoints_service: logging.error('Please specify endpoints service with --endpoints_service ' 'in deploying endpoints.') return False if (options.endpoints_service and options.project_id not in options.endpoints_service): logging.error('The project "%s" is not matched to the endpoints service ' '"%s".', options.project_id, options.endpoints_service) return False return True
872feb5ac314ed2ef28ddbfaeff1b5dafc5e9ed8
4,921
def parse_line(line): """ Parse a queue trace line into a dict """ line = line.split() result = {} if len(line) < 12: return result result["event"] = line[0] result["time"] = float(line[1]) result["from"] = int(line[2]) result["to"] = int(line[3]) result["type"] = line[4] result["size"] = int(line[5]) result["flags"] = line[6] result["fid"] = int(line[7]) result["src"] = line[8] result["dst"] = line[9] result["seqnum"] = int(line[10]) result["pktid"] = int(line[11]) return result
432e6a624626e89d27fe6d3d9ed7c4230d97c0a6
4,922
def get_census_params(variable_ids, county_level=False): """Gets census url params to make an API call. variable_ids: The ids of the variables to request. Automatically includes NAME. county_level: Whether to request at the county level, or the state level.""" keys = variable_ids.copy() keys.append("NAME") params = {"get": ",".join(keys)} params["for"] = "county:*" if county_level else "state:*" return params
b24204c8e9ef82575b54151bdc0ac98de0fb7fc0
4,923
def lookupName(n, names): """Check if name is in list of names Parameters ---------- n : str Name to check names : list List of names to check in Returns ------- bool Flag denoting if name has been found in list (True) or not (False) """ if n in names: return True else: return False
0fbb97e252f5daf9de52a946c206fa74395b01c6
4,924
def calculate_appointments(new_set, old_set): """ Calculate different appointment types. Used for making useful distinctions in the email message. new_set will be the fresh set of all available appointments at a given interval old_set will the previous appointments variable getting passed in. Ex1: Addition of HONEOYE new_set = {'LIVERPOOL', 'BROOKLYN', 'HONEOYE', 'KINGSTON'} old_set = {'LIVERPOOL', 'BROOKLYN', 'KINGSTON'} returns ->-> new_appointments = {'HONEOYE'} all_appointments = {'LIVERPOOL', 'BROOKLYN', 'KINGSTON', HONEOYE} Ex2: No Changes new_set = {'LIVERPOOL', 'BROOKLYN', 'HONEOYE', 'KINGSTON'} old_set = {'LIVERPOOL', 'BROOKLYN', 'HONEOYE', 'KINGSTON'} returns ->-> new_appointments = set() (empty set) all_appointments = {'LIVERPOOL', 'BROOKLYN', 'HONEOYE', 'KINGSTON'} """ new_appointments = new_set.difference(old_set) # set of All appointments minus set of Old appointments yields the set of New appointments old_appointments = new_set.intersection(old_set) # New intersect Old. yields those appointments that (intersect is equivalent the overlap in a venn diagram) return new_appointments, old_appointments # Return new sets
b54735293ba910e2b310e55e263e2611863d088a
4,925
import random def rand_x_digit_num(x): """Return an X digit number, leading_zeroes returns a string, otherwise int.""" return '{0:0{x}d}'.format(random.randint(0, 10**x-1), x=x)
b46864143ca6186ebeede6c687a85d1b585e70db
4,927
def filter_X_dilutions(df, concentration): """Select only one dilution ('high', 'low', or some number).""" assert concentration in ['high','low'] or type(concentration) is int df = df.sort_index(level=['CID','Dilution']) df = df.fillna(999) # Pandas doesn't select correctly on NaNs if concentration == 'low': df = df.groupby(level=['CID']).first() elif concentration == 'high': df = df.groupby(level=['CID']).last() else: df = df.loc[[x for x in df.index if x[1]==concentration]] df = df.groupby(level=['CID']).last() df = df.replace(999,float('NaN')) # Undo the fillna line above. return df
b886c87c1c5b96e6efc951ef197d3a0fb13707c1
4,930
def update_params(base_param: dict, additional: dict): """overwrite base parameter dictionary Parameters ---------- base_param : dict base param dictionary additional : dict additional param dictionary Returns ------- dict updated parameter dictionary """ for key in additional: base_param[key] = additional[key] return base_param
e73581cb0b8d264343ead56da52c6dc12fe49dd7
4,931
import pathlib def check_path(path: pathlib.Path) -> bool: """Check path.""" return path.exists() and path.is_file()
2279dde6912ae6f6eb51d90ed5e71e0b3892fea9
4,933
def count(A,target): """invoke recursive function to return number of times target appears in A.""" def rcount(lo, hi, target): """Use recursion to find maximum value in A[lo:hi+1].""" if lo == hi: return 1 if A[lo] == target else 0 mid = (lo+hi)//2 left = rcount(lo, mid, target) right = rcount(mid+1, hi, target) return left + right return rcount(0, len(A)-1, target)
79d9be64d332a11993f65f3c0deba8b4de39ebda
4,935
def is_bool_type(typ): """ Check if the given type is a bool. """ if hasattr(typ, '__supertype__'): typ = typ.__supertype__ return isinstance(typ, type) and issubclass(typ, bool)
3d8dfae184be330c8cbd7c0e7382311fef31ede5
4,936
def get_jmp_addr(bb): """ @param bb List of PseudoInstructions of one basic block @return Address of jump instruction in this basic block """ for inst in bb: if inst.inst_type == 'jmp_T': return inst.addr return None
13e69032bc7d6ed5413b5efbb42729e11661eab1
4,939
import torch def predict_batch(model, x_batch, dynamics, fast_init): """ Compute the softmax prediction probabilities for a given data batch. Args: model: EnergyBasedModel x_batch: Batch of input tensors dynamics: Dictionary containing the keyword arguments for the relaxation dynamics on u fast_init: Boolean to specify if fast feedforward initilization is used for the prediction Returns: Softmax classification probabilities for the given data batch """ # Initialize the neural state variables model.reset_state() # Clamp the input to the test sample, and remove nudging from ouput model.clamp_layer(0, x_batch.view(-1, model.dimensions[0])) model.set_C_target(None) # Generate the prediction if fast_init: model.fast_init() else: model.u_relax(**dynamics) return torch.nn.functional.softmax(model.u[-1].detach(), dim=1)
61102cfa3bcb3e7d52e9f3eca8c97db4d726c1a7
4,944
def spot_centroid(regions): """Returns centroids for a list of regionprops. Args: regions (regionprops): List of region proposals (skimage.measure). Returns: list: Centroids of regionprops. """ return [r.centroid for r in regions]
f53f403dddf0af123afd207e33cc06254a0f2538
4,952