content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
from typing import Dict from typing import Pattern import re def get_xclock_hints() -> Dict[str, Pattern]: """Retrieves hints to match an xclock window.""" return {"name": re.compile(r"^xclock$")}
99e1fe51b46cb5e101c2a1c86cf27b2b60c0a38e
704,942
from datetime import datetime def tzdt(fulldate: str): """ Converts an ISO 8601 full timestamp to a Python datetime. Parameters ---------- fulldate: str ISO 8601 UTC timestamp, e.g. `2017-06-02T16:23:14.815Z` Returns ------- :class:`datetime.datetime` Python datetime representing ISO timestamp. """ if fulldate[-1] == "Z": fulldate = fulldate[0:-1] + "+0000" return datetime.strptime(fulldate, "%Y-%m-%dT%H:%M:%S.%f%z")
e327c23f9aecf587432fa0170c8bcd3a9a534bd1
704,945
import math def lat2y(latitude): """ Translate a latitude coordinate to a projection on the y-axis, using spherical Mercator projection. :param latitude: float :return: float """ return 180.0 / math.pi * (math.log(math.tan(math.pi / 4.0 + latitude * (math.pi / 180.0) / 2.0)))
59a0a111c22c99dd23e80ed64d6355b67ecffd42
704,946
import random def secure_randint(min_value, max_value, system_random=None): """ Return a random integer N such that a <= N <= b. Uses SystemRandom for generating random numbers. (which uses os.urandom(), which pulls from /dev/urandom) """ if not system_random: system_random = random.SystemRandom() return system_random.randint(min_value, max_value)
f4b61457c6e384e6185a5d22d95539001903670d
704,949
import math def convert_weight(prob): """Convert probility to weight in WFST""" weight = -1.0 * math.log(10.0) * float(prob) return weight
d9f6c38fd2efa49ddd515878a0943f9c82d42e1a
704,951
def CalculateMediationPEEffect(PointEstimate2, PointEstimate3): """Calculate derived effects from simple mediation model. Given parameter estimates from a simple mediation model, calculate the indirect effect, the total effect and the indirect effects Parameters ---------- PointEstimate2 : array This is an array of parameter estimates for the regression equation of A on B. With no covariates, this will be an array of length 1 PointEstimate3 : array This is an array of parameter estimates for the regression equation of A and B on C. With no covariates, this will be an array of length 2 Returns ------- IE The indirect effect, parameter a times b TE The total effect, which is IE plus DE DE The direct effect, the effect of A on C, when B is in the model a The effect of A on B b The effect of B on C, when A is in the model """ # Indirect Effect a = PointEstimate2[0] # The model of B with A has one beta which is index 0 b = PointEstimate3[1] # The model of C with A and B has two betas, b has index = 1 IE = a*b # Direct Effect DE = PointEstimate3[0] # This is c' # Total Effect TE = DE + IE return IE, TE, DE, a, b
d2247985e46a78bc3333983e09a1030fd59f139d
704,957
def rgb_to_hex(red_component=None, green_component=None, blue_component=None): """Return color as #rrggbb for the given color tuple or component values. Can be called as TUPLE VERSION: rgb_to_hex(COLORS['white']) or rgb_to_hex((128, 63, 96)) COMPONENT VERSION rgb_to_hex(64, 183, 22) """ if isinstance(red_component, tuple): red_component, green_component, blue_component = red_component return '#{:02X}{:02X}{:02X}'.format( red_component, green_component, blue_component)
37f5216f7f22f82072db6980541a815d87d02ef3
704,958
def int_to_binary(x, n): """Convert an integer into its binary representation Args: x (int): input integer n (int): number of leading zeros to display Returns: (str) binary representation """ if type(x) != int: raise ValueError('x must be an integer.') return format(x, 'b').zfill(n)
c3d68a798f84988290bd4e845a5bcc015872b054
704,963
def parse_commands(log_content): """ parse cwl commands from the line-by-line generator of log file content and returns the commands as a list of command line lists, each corresponding to a step run. """ command_list = [] command = [] in_command = False line = next(log_content) while(line): line = line.strip('\n') if '[job' in line and line.endswith('docker \\'): line = 'docker \\' # remove the other stuff in_command = True if in_command: command.append(line.strip('\\').rstrip(' ')) if not line.endswith('\\'): in_command = False command_list.append(command) command = [] line = next(log_content) return(command_list)
dff555cd0ec84619425fc05e4c8892c603bcc994
704,968
def to_geojson(series): """Return a GeoJSON geometry collection from the series (must be in EPSG:4326). Did not use the builtin for the series since it introduces a lot of bloat. """ return { "type": "GeometryCollection", "geometries": series.apply(lambda x: x.__geo_interface__).to_list(), }
2ebdc001ed7a6fb3ee6e6cac9fc7722e19518e20
704,971
def getConstraintWeightAttr(leader, constraint): """ Return the weight attribute from a constraint that corresponds to a specific leader node. Args: leader (PyNode): A node that is one of the leaders of a constraint constraint (PyNode): A constraint node """ for i, target in enumerate(constraint.getTargetList()): if leader == target: return constraint.getWeightAliasList()[i]
e53ef981f505f1c8fc21fff7b71605764d6da3e0
704,972
import requests def get_api_result(url): """ Retrieve JSON data from API via a supplied URL """ s = requests.Session() r = s.get(url) return r.json()
933bd000b2e352f950ec86f8b6f1470ff2b0ecbd
704,975
def pyramid_sum(lower, upper, margin = 0): """Returns the sum of the numbers from lower to upper, and outputs a trace of the arguments and return values on each call.""" blanks = " " * margin print(blanks, lower, upper) # Print the arguments if lower > upper: print(blanks, 0) # Print the returned value return 0 else: result = lower + pyramid_sum(lower + 1, upper, margin + 4) print(blanks, result) # Print the returned value return result
751facb309f362c35257aab2b239a37b39a98a04
704,982
import unicodedata def normalize_caseless(text): """Normalize a string as lowercase unicode KD form. The normal form KD (NFKD) will apply the compatibility decomposition, i.e. replace all compatibility characters with their equivalents. """ return unicodedata.normalize("NFKD", text.casefold())
c26f8470ea6312cce7a97930999d489ee30eb692
704,983
def calculate_maximum_potential_edge_counts(channel_composition, N, max_ble_span): """Computes the maximum number of possible occurrences per potential edge type. Parameters ---------- channel_composition : Dict[str, int] Channel composition description. N : int Number of BLEs in the cluster. max_ble_span : int Maximum BLE span in the pattern. Returns ------- Dict[str, int] Maximum number of occurrences of each edge type. """ back_dir = {'L' : 'R', 'R' : 'L', 'U' : 'D', 'D' : 'U'} counts = {} for src_ble in range(0, N): for sink_ble in range(max(0, src_ble - max_ble_span),\ min(N - 1, src_ble + max_ble_span) + 1): for w_src in channel_composition: src_dirs = ('L', 'R') if w_src[0] == 'V': src_dirs = ('U', 'D') for src_dir in src_dirs: for w_sink in channel_composition: sink_dirs = ('L', 'R') if w_sink[0] == 'V': sink_dirs = ('U', 'D') for sink_dir in sink_dirs: if sink_dir == back_dir[src_dir]: continue inc = channel_composition[w_src] * channel_composition[w_sink] try: counts[(w_src, w_sink)] += inc except: counts.update({(w_src, w_sink) : inc}) e_str = lambda e : "potential_edge__%s%s__%s%s"\ % (e[0], "_tap_0" if e[0][0] == 'V' else '',\ e[1], "_tap_0" if e[1][0] == 'V' else '') return {e_str(e) : counts[e] for e in counts}
55f891631bd109066735e9997cbb3dc35de8d21a
704,984
import json def try_to_replace_line_json(line, json_type, new_json, json_prefix=""): """Attempts to replace a JSON declaration if it's on the line. Parameters ---------- line: str A line from a JavaScript code file. It's assumed that, if it declares a JSON, this declaration will only take up one line (i.e. it will be of the form "[whitespace?]var [JSON prefix?][JSON name] = {JSON contents};"). If a replacement is made, everything on and after the { in this line will be replaced with the contents of the new JSON, followed by ";\n". json_type: str One of "rank", "sample", or "count". Other values will result in a ValueError being thrown. new_json: dict A JSON to try replacing the current variable declaration (if present) on the input line with. json_prefix: str (default value: "") An optional prefix that will be appended to any JSON names we try to replace. If this is anything but "", this *won't replace normal JSON lines* (e.g. "var rankPlotJSON = {") -- instead, this will only replace lines with the given prefix (e.g. if the prefix is "SST", then only JSON lines of the format "var SSTrankPlotJSON = {" will be replaced. Returns ------- (line, replacement_made): str, bool If no replacement was made, replacement_made will be False and line will just equal to the input line. If a replacement was made, replacement_made will be True and line will be equal to the new line with the JSON replaced. """ prefixToReplace = "" if json_type == "rank": prefixToReplace = "var {}rankPlotJSON = {{" elif json_type == "sample": prefixToReplace = "var {}samplePlotJSON = {{" elif json_type == "count": prefixToReplace = "var {}countJSON = {{" else: raise ValueError( "Invalid json_type argument. Must be 'rank', " "'sample', or 'count'." ) prefixToReplace = prefixToReplace.format(json_prefix) if line.lstrip().startswith(prefixToReplace): return ( ( line[: line.index("{")] + json.dumps(new_json, sort_keys=True) + ";\n" ), True, ) return line, False
602897349b52be3f10a41cf90d211ad70a6d4cc2
704,988
def read(file_path, lines=False): """Returns contents of file either as a string or list of lines.""" with open(file_path, 'r') as fp: if lines: return fp.readlines() return fp.read()
86b36dbc2792ac70bd9a71c74486643b3cdef690
704,989
def get_state(initial, input_value=None): """Get new state, filling initial and optional input_value.""" return { 'last_position': None, 'initial': [initial], 'input': [input_value] if input_value is not None else [], 'output': [], }
7520341debf6b7287a445be1a44e51bd5675472f
704,993
def cell_snippet(x, is_date=False): """create the proper cell snippet depending on the value type""" if type(x) == int: return { 'userEnteredValue': {'numberValue': x}, 'userEnteredFormat': { 'numberFormat': { 'type': 'NUMBER', 'pattern': '#,##0' } } } elif type(x) == float: return { 'userEnteredValue': {'numberValue': x}, 'userEnteredFormat': { 'numberFormat': { 'type': 'DATE' if is_date else 'NUMBER', 'pattern': 'yyyy/mm/dd hh:mm:ss' if is_date else '#,##0.00' } } } else: return { 'userEnteredValue': {'stringValue': x} }
bc91279e5e9b4e9e6b853badf28081e0e4746549
704,995
def validate_index(n: int, ind: int, command: str): """ Simple function to validate existence of index within in the model repository. Args: n (int): length of indices ind (int): selected index command (str): name of command for "tailored" help message """ # ensure index exists in indices if -n <= ind < n: return ind else: raise IndexError(f"Index {ind} does not exist... Run `kaos {command} list` again")
3aef711caef041d2f4aa1dfdf0b5135d9f626b3c
705,002
import base64 def fix_string_attr(tfjs_node): """ Older tfjs models store strings as lists of ints (representing byte values). This function finds and replaces those strings, so protobuf can correctly decode the json. """ def fix(v): if isinstance(v, list): return base64.encodebytes(bytes(v)).decode() return v if 'attr' not in tfjs_node: return for v in tfjs_node['attr'].values(): if 's' in v: v['s'] = fix(v['s']) if 'list' in v and 's' in v['list']: for i, x in enumerate(v['list']['s']): v['list']['s'][i] = fix(x)
c137144fd9a42134451d2c49c93b20d562f1188b
705,013
def last_char(text: str, begin: int, end: int, chars: str) -> int: """Returns the index of the last non-whitespace character in string `text` within the bounds [begin, end]. """ while end > begin and text[end - 1] in chars: end -= 1 return end
5d59cd50fb99593d5261513327b9799fc175cd6c
705,017
def shift_left_bit_length(x: int) -> int: """ Shift 1 left bit length of x :param int x: value to get bit length :returns: 1 shifted left bit length of x """ return 1 << (x - 1).bit_length()
854e79309125c60c6e5975685078809fb4c016a4
705,029
from typing import List def get_function_contents_by_name(lines: List[str], name: str): """ Extracts a function from `lines` of segmented source code with the name `name`. Args: lines (`List[str]`): Source code of a script seperated by line. name (`str`): The name of the function to extract. Should be either `training_function` or `main` """ if name != "training_function" and name != "main": raise ValueError(f"Incorrect function name passed: {name}, choose either 'main' or 'training_function'") good_lines, found_start = [], False for line in lines: if not found_start and f"def {name}" in line: found_start = True good_lines.append(line) continue if found_start: if name == "training_function" and "def main" in line: return good_lines if name == "main" and "if __name__" in line: return good_lines good_lines.append(line)
60239b0063e83a71641d85194f72a9cc61221177
705,031
import json def get_json(obj, indent=4): """ Get formatted JSON dump string """ return json.dumps(obj, sort_keys=True, indent=indent)
be1376fcb9e820cc5012f694ca830ba0c52b5fef
705,033
def build_operation(id, path, args, command="set", table="block"): """ Data updates sent to the submitTransaction endpoint consist of a sequence of "operations". This is a helper function that constructs one of these operations. """ if isinstance(path, str): path = path.split(".") return {"id": id, "path": path, "args": args, "command": command, "table": table}
74656a7568a6d705c9c24c091660b93d16977512
705,034
def compute_min_refills(distance: int, tank: int, stops: list): """ Computes the minimum number of gas station pit stops. >>> compute_min_refills(950, 400, [200, 375, 550, 750]) 2 >>> compute_min_refills(10, 3, [1, 2, 5, 9]) -1 Example 3: >>> compute_min_refills(200, 250, [100, 150]) 0 """ previous, current = 0, 0 positions = [0] + stops + [distance] num_refills, cur_position = 0, 0 while current <= len(stops): previous = current while current <= len(stops) and ( positions[current + 1] - positions[previous] ) <= tank: current += 1 cur_position = positions[current] if current == previous: return -1 # destination not possible if cur_position < distance: num_refills += 1 return num_refills
41dff6085f3b46b191c40c3dde9b68ee3ee41e3e
705,036
def _upper(string): """Custom upper string function. Examples: foo_bar -> FooBar """ return string.title().replace("_", "")
04ad1596657736847e909e0c4937afc407ea1f60
705,038
import re def escape_sql_string(string: str) -> str: """ Escapes single quotes and backslashes with a backslash and wraps everything between single quotes. """ escaped_identifier = re.sub(r"(['\\])", r"\\\1", string) return f"'{escaped_identifier}'"
68f91b6a5c5bfcec6298f6b6f5c7dfb6b7a095f5
705,039
def _round_to_base(x, base=5): """Round to nearest multiple of `base`.""" return int(base * round(float(x) / base))
beccfe2951b9fcc7aafef57fd966418df1ce2cc1
705,040
from typing import Any def ispointer(obj: Any) -> bool: """Check if a given obj is a pointer (is a remote object). Args: obj (Any): Object. Returns: bool: True (if pointer) or False (if not). """ if type(obj).__name__.endswith("Pointer") and hasattr(obj, "id_at_location"): return True return False
34bdf58b8352a11d878043ee2611d0b7c2a0dae5
705,042
def svm_predict(model, samples): """Predicts the response based on the trained model""" return model.predict(samples)[1].ravel()
a510a64e602bbe14a3aa192cacd11b996704d91e
705,044
def ubatch_to_csv(batch): """ Utility function to convert a batch of APIUser data to CSV. """ permkey = 'permissions_dict' fields = [k for k in batch[0].keys() if k != permkey] fields.extend(batch[0][permkey].keys()) return '{}\n{}'.format(','.join(fields), '\n'.join([ ','.join([str(r.get(f, r[permkey].get(f, None))) for f in fields]) for r in batch ]))
9950cb8e1f79f2cc37580142a125717e7e534de1
705,047
def guid_to_num(guid): """ Convert a DHT guid to an integer. Args: guid: The guid to convert, as a string or unicode, in hexadecimal. Returns: An integer corresponding to the DHT guid given. """ return int(guid.rstrip('L'), base=16)
7da3e7a60b6ae3410baab62083714f47a3afc790
705,048
import itertools def gather_slice_list_items(slices, key): """For a list of slices, get the flattened list of all of a certain key.""" return list(itertools.chain(*[s[key] for s in slices if key in s]))
068b511aefa124f9881f0d8cdc4d115b15922066
705,049
import textwrap def dedent(text): """Remove any common leading whitespace from every line in a given text.""" return textwrap.dedent(text)
514f9f41feac1c19ff92d6c9258bf54d7d3d7bd8
705,056
def enumerate_square(i, n): """ Given i in the range(n^2-n) compute a bijective mapping range(n^2-n) -> range(n)*range(n-1) """ row = int(i // (n-1)) col = int(i % (n-1)) if col >= row: col += 1 return row, col
93d3465c88a7bc9952161524fded4d7250131a65
705,057
import math def discounted_cumulative_gain(rank_list): """Calculate the discounted cumulative gain based on the input rank list and return a list.""" discounted_cg = [] discounted_cg.append(rank_list[0]) for i in range(1, len(rank_list)): d = rank_list[i]/math.log2(i+1) dcg = d + discounted_cg[i-1] discounted_cg.append(dcg) return discounted_cg
eaa5ad6185e2abb239097be5399dffd82d143fd3
705,064
def format_dnb_company_investigation(data): """ Format DNB company investigation payload to something DNBCompanyInvestigationSerlizer can parse. """ data['dnb_investigation_data'] = { 'telephone_number': data.pop('telephone_number', None), } return data
9c27990bad98b36649b42c20796caabeaae1e21b
705,067
def calculate_relative_enrichments(results, total_pathways_by_resource): """Calculate relative enrichment of pathways (enriched pathways/total pathways). :param dict results: result enrichment :param dict total_pathways_by_resource: resource to number of pathways :rtype: dict """ return { resource: len(enriched_pathways) / total_pathways_by_resource[resource] for resource, enriched_pathways in results.items() }
7060e032f2a619929cfcf123cf0946d7965b86de
705,069
def load_targets_file(input_file): """ Takes a string indicating a file name and reads the contents of the file. Returns a list containing each line of the file. Precondition: input_file should exist in the file system. """ with open(input_file, 'r') as f: f = f.readlines() out = [i.replace('\n','').replace('\r','') for i in f] return out
40d305e244264d6c3249bb9fb914cda3ebcda711
705,072
def show_hidden_word(secret_word, old_letters_guessed): """ :param secret_word: :param old_letters_guessed: :return: String of the hidden word except the letters already guessed """ new_string = "" for letter in secret_word: if letter in old_letters_guessed: new_string = new_string + letter else: new_string = new_string + ' _ ' return new_string
2b3618619dcde2875da9dc8600be334e7aaadaad
705,074
def filter_packages(packages: list, key: str) -> list: """Filter out packages based on the given category.""" return [p for p in packages if p["category"] == key]
46f11f5a8269eceb9665ae99bdddfef8c62295a2
705,075
def _column_number_to_letters(number): """ Converts given column number into a column letters. Right shifts the column index by 26 to find column letters in reverse order. These numbers are 1-based, and can be converted to ASCII ordinals by adding 64. Parameters ---------- number : int Column number to convert to column letters. Returns ------- unicode Column letters. References ---------- :cite:`OpenpyxlDevelopers2019` Examples -------- # Doctests skip for Python 2.x compatibility. >>> _column_number_to_letters(128) # doctest: +SKIP 'DX' """ assert 1 <= number <= 18278, ( 'Column number {0} must be in range [1, 18278]!'.format(number)) letters = [] while number > 0: number, remainder = divmod(number, 26) if remainder == 0: remainder = 26 number -= 1 letters.append(chr(remainder + 64)) return ''.join(reversed(letters))
c9a68bcd32c8f254af322bc61e447cfae61cb6d2
705,080
def smallest_evenly_divisible(min_divisor, max_divisor, minimum_dividend=0): """Returns the smallest number that is evenly divisible (divisible with no remainder) by all of the numbers from `min_divisor` to `max_divisor`. If a `minimum_dividend` is provided, only dividends greater than this number will be evaluated. """ factors = range(max_divisor,0,-min_divisor) while True: counter = 0 for i in factors: if minimum_dividend % i != 0: break else: counter += 1 if counter == len(factors): return minimum_dividend minimum_dividend += 1
fa23d9a413a0909bfc05d7eb928aec8ade4cb06f
705,082
def factorial(n): """ Returns the factorial of n Parameters ---------- n : int denotes the non-negative integer for which factorial value is needed """ if(n<0): raise NotImplementedError( "Enter a valid non-negative integer" ) if(n==0 or n==1): return 1 elif(n==2): return 2 return n*factorial(n-1)
fe0b7100e1292d1e96daf18545d9fdfb931f9f74
705,083
def Divide(a, b): """Returns the quotient, or NaN if the divisor is zero.""" if b == 0: return float('nan') return a / float(b)
3ed0b07949bb802177e52bf8d04e9dfde92ab2de
705,084
import ntpath def path_leaf(path): """ Extract the file name from a path. If the file ends with a slash, the basename will be empty, so the function to deal with it Parameters ---------- path : str Path of the file Returns ------- output : str The name of the file """ head, tail = ntpath.split(path) output = tail or ntpath.basename(head) return output
58930f081c2366b9084bb279d1b8b267e5f93c96
705,086
def focus_metric(data, merit_function='vollath_F4', **kwargs): """Compute the focus metric. Computes a focus metric on the given data using a supplied merit function. The merit function can be passed either as the name of the function (must be defined in this module) or as a callable object. Additional keyword arguments for the merit function can be passed as keyword arguments to this function. Args: data (numpy array) -- 2D array to calculate the focus metric for. merit_function (str/callable) -- Name of merit function (if in panoptes.utils.images) or a callable object. Returns: scalar: result of calling merit function on data """ if isinstance(merit_function, str): try: merit_function = globals()[merit_function] except KeyError: raise KeyError( "Focus merit function '{}' not found in panoptes.utils.images!".format(merit_function)) return merit_function(data, **kwargs)
c8f571e11202d39d8f331fca5fc93333aeb71e62
705,087
def get_canonical_import(import_set): """Obtain one single import from a set of possible sources of a symbol. One symbol might come from multiple places as it is being imported and reexported. To simplify API changes, we always use the same import for the same module, and give preference to imports coming from main tensorflow code. Args: import_set: (set) Imports providing the same symbol Returns: A module name to import """ # We use the fact that list sorting is stable, so first we convert the set to # a sorted list of the names and then we resort this list to move elements # not in core tensorflow to the end. import_list = sorted(import_set) import_list.sort(key=lambda x: 'lite' in x) return import_list[0]
ae53ca4d271ab543a7a13f1ce8240ce6eb328bbb
705,088
import torch from typing import Iterable def _tensor_in(tensor: torch.Tensor, iterable: Iterable[torch.Tensor]): """Returns whether `tensor is element` for any element in `iterable` This function is necessary because `tensor in iterable` does not work reliably for `Tensor`s. See https://discuss.pytorch.org/t/how-to-judge-a-tensor-is-in-a-list/15998/4 for further discussion. """ return any(tensor is elem for elem in iterable)
84ac8a129440c9c8d7785029b04bd403514a3bb9
705,089
def is_development_mode(registry): """ Returns true, if mode is set to development in current ini file. :param registry: request.registry :return: Boolean """ if 'mode' in registry.settings: return registry.settings['mode'].lower() == 'development' return False
af1b11fa69231a455406247b593f8ff49855bc3f
705,090
def handle_internal(msg): """Process an internal message.""" internal = msg.gateway.const.Internal(msg.sub_type) handler = internal.get_handler(msg.gateway.handlers) if handler is None: return None return handler(msg)
0f5cae49cf5d36a5e161f88902c46af931fd622a
705,092
import math def conv_float2negexp(val): """Returns the least restrictive negative exponent of the power 10 that would achieve the floating point convergence criterium *val*. """ return -1 * int(math.floor(math.log(val, 10)))
562ccf7d34f8034a25cabfb471e7fc2ab9c0feb6
705,096
import random def img_get_random_patch(img,w,h): """Get a random patch of a specific width and height from an image""" # Note that for this function it is the user's responsibility to ensure # the image size is big enough. We'll do an asertion to help but... # Figure out the maximum starting point within the image that max_x = img.shape[1] - w max_y = img.shape[0] - h # Make sure the size is big enough assert max_x >= 0, 'Trying to get a patch wider that the image width' assert max_y >= 0, 'Trying to get a patch higher that the image height' # Get a random starting point x = random.randint(0,max_x) y = random.randint(0,max_y) # Get the patch within the image image_patch = img[y:y+h,x:x+w, ...] # All done return image_patch
41ce199eb5ab8eb136f740eb2e1b495226510690
705,098
def splinter_remote_url(request): """Remote webdriver url. :return: URL of remote webdriver. """ return request.config.option.splinter_remote_url
17bf9bf3ebd7296a2305fe9edeb7168fbca7db10
705,102
def pad(value, digits, to_right=False): """Only use for positive binary numbers given as strings. Pads to the left by default, or to the right using to_right flag. Inputs: value -- string of bits digits -- number of bits in representation to_right -- Boolean, direction of padding Output: string of bits of length 'digits' Raises exception if value is larger than digits in length. Example: pad('0010', 6) -> '000010' pad('0010', 6, True) -> '001000' """ len_val = len(value) assert len_val <= digits rem_digits = digits - len_val if to_right: return value + "0"*rem_digits else: return "0"*rem_digits + value
98476653ccafeba0a9d81b9193de0687dbf9d85c
705,104
def check_band_below_faint_limits(bands, mags): """ Check if a star's magnitude for a certain band is below the the faint limit for that band. Parameters ---------- bands : str or list Band(s) to check (e.g. ['SDSSgMag', 'SDSSiMag']. mags : float or list Magnitude(s) of the band(s) corresponding to the band(s) in the bands variable Returns ------- list : a new list of bands that are above the faint limit (ie - use-able bands) """ if isinstance(bands, str): bands = [bands] if isinstance(mags, float): mags = [mags] new_bands = [] for band, mag in zip(bands, mags): if 'SDSSgMag' in band and mag >= 24: continue elif 'SDSSrMag' in band and mag >= 24: continue elif 'SDSSiMag' in band and mag >= 23: continue elif 'SDSSzMag' in band and mag >= 22: continue else: new_bands.append(band) return new_bands
9e26fcef5bf79b4480e93a5fe9acd7416337cf09
705,106
def add_license_creation_fields(license_mapping): """ Return an updated ``license_mapping`` of license data adding license status fields needed for license creation. """ license_mapping.update( is_active=False, reviewed=False, license_status="NotReviewed", ) return license_mapping
3856c434a672150c09af4b5e4c7fd9fa55014d5c
705,108
def dequote(s): """ from: http://stackoverflow.com/questions/3085382/python-how-can-i-strip-first-and-last-double-quotes If a string has single or double quotes around it, remove them. Make sure the pair of quotes match. If a matching pair of quotes is not found, return the string unchanged. """ if (s[0] == s[-1]) and s.startswith(("'", '"')): return s[1:-1] return s
41c5e5fed901d70472dd6eef1ada7d53d395002c
705,113
from typing import Tuple def requests_per_process(process_count: int, conf) -> Tuple[int, int]: """Divides how many requests each forked process will make.""" return ( int(conf.concurrency / process_count), int(conf.requests / process_count), )
00af7a63471201c3fffcfb610f74a745ca326b68
705,120
def mergeSort(nums): """归并排序""" if len(nums) <= 1: return nums mid = len(nums)//2 #left left_nums = mergeSort(nums[:mid]) #right right_nums = mergeSort(nums[mid:]) print(left_nums) print(right_nums) left_pointer,right_pointer = 0,0 result = [] while left_pointer < len(left_nums) and right_pointer < len(right_nums): if left_nums[left_pointer] <= right_nums[right_pointer]: result.append(left_nums[left_pointer]) left_pointer += 1 else: result.append(right_nums[right_pointer]) right_pointer += 1 result += left_nums[left_pointer:] result += right_nums[right_pointer:] return result
708166485cf3e916bbde12edec7057c404ee830d
705,122
import json def generate_api_queries(input_container_sas_url,file_list_sas_urls,request_name_base,caller): """ Generate .json-formatted API input from input parameters. file_list_sas_urls is a list of SAS URLs to individual file lists (all relative to the same container). request_name_base is a request name for the set; if the base name is 'blah', individual requests will get request names of 'blah_chunk000', 'blah_chunk001', etc. Returns both strings and Python dicts return request_strings,request_dicts """ assert isinstance(file_list_sas_urls,list) request_dicts = [] request_strings = [] # i_url = 0; file_list_sas_url = file_list_sas_urls[0] for i_url,file_list_sas_url in enumerate(file_list_sas_urls): d = {} d['input_container_sas'] = input_container_sas_url d['images_requested_json_sas'] = file_list_sas_url if len(file_list_sas_urls) > 1: chunk_id = '_chunk{0:0>3d}'.format(i_url) request_name = request_name_base + chunk_id else: request_name = request_name_base d['request_name'] = request_name d['caller'] = caller request_dicts.append(d) request_strings.append(json.dumps(d,indent=1)) return request_strings,request_dicts
fa6ba9bbbfa26af9a7d1c6e6aa03d0e53e16f630
705,123
def crop_image(image, crop_box): """Crop image. # Arguments image: Numpy array. crop_box: List of four ints. # Returns Numpy array. """ cropped_image = image[crop_box[0]:crop_box[2], crop_box[1]:crop_box[3], :] return cropped_image
03ddb9927b82ddfe3ab3a36ec3329b5a980fe209
705,126
import warnings def reorder(names, faname): """Format the string of author names and return a string. Adapated from one of the `customization` functions in `bibtexparser`. INPUT: names -- string of names to be formatted. The names from BibTeX are formatted in the style "Last, First Middle and Last, First Middle and Last, First Middle" and this is the expected style here. faname -- string of the initialized name of the author to whom formatting will be applied OUTPUT: nameout -- string of formatted names. The current format is "F.M. Last, F.M. Last, and F.M. Last". """ # Set the format tag for the website's owner, to highlight where on # the author list the website owner is. Default is ** my_name_format_tag = '**' # Convert the input string to a list by splitting the string at the # "and " and strip out any remaining whitespace. nameslist = [i.strip() for i in names.replace('\n', ' ').split("and ")] # Initialize a list to store the names after they've been tidied # up. tidynames = [] # Loop through each name in the list. for namestring in nameslist: # Strip whitespace from the string namestring = namestring.strip() # If, for some reason, we've gotten a blank name, skip it if len(namestring) < 1: continue # Split the `namestring` at the comma, but only perform the # split once. namesplit = namestring.rsplit(',', 1) if (len(namesplit) == 1): namesplit = namestring.rsplit(' ', 1) last = namesplit[-1].strip().strip('{}') firsts = namesplit[:-1] else: last = namesplit[0].strip().strip('{}') firsts = [i.strip().strip('.') for i in namesplit[1].split()] # Now that all the first name edge cases are sorted out, we # want to initialize all the first names. Set the variable # initials to an empty string to we can add to it. Then loop # through each of the items in the list of first names. Take # the first element of each item and append a period, but no # space. initials = '' for item in firsts: initials += item[0] + '.' # Stick all of the parts of the name together in `tidynames` tidynames.append(initials + ' ' + last) # Find the case of the website author and set the format for that # name if faname is not None: try: i = tidynames.index(faname) tidynames[i] = my_name_format_tag + tidynames[i] + my_name_format_tag except ValueError: warnings.warn("Couldn't find {} in the names list. Sorry!".format(faname)) # Handle the various cases of number of authors and how they should # be joined. Convert the elements of `tidynames` to a string. if len(tidynames) > 2: tidynames[-1] = 'and ' + tidynames[-1] nameout = ', '.join(tidynames) elif len(tidynames) == 2: tidynames[-1] = 'and ' + tidynames[-1] nameout = ' '.join(tidynames) else: # If `tidynames` only has one name, we only need to convert it # to a string. The first way that came to mind was to join the # list to an empty string. nameout = ''.join(tidynames) # Return `nameout`, the string of formatted authors return nameout
4012add188a3497b582078d7e7e05eeafc95252f
705,127
def array_xy_offsets(test_geo, test_xy): """Return upper left array coordinates of test_xy in test_geo Args: test_geo (): GDAL Geotransform used to calcululate the offset test_xy (): x/y coordinates in the same projection as test_geo passed as a list or tuple Returns: x_offset: x coordinate of the upper left of the array y_offset: y coordinate of the uppler left of the array """ x_offset = int((test_xy[0] - test_geo[0]) / test_geo[1]) y_offset = int((test_xy[1] - test_geo[3]) / test_geo[5]) return x_offset, y_offset
5fa67b7df833459f3fc59951a056316f249acc69
705,128
def scatterList(z): """ scatterList reshapes the solution vector z of the N-vortex ODE for easy 2d plotting. """ k = int(len(z)/2) return [z[2*j] for j in range(k)], [z[2*j+1] for j in range(k)]
422bf448ae999f56e92fdc81d05700189122ad0e
705,135
def rgb_to_hex_string(value): """Convert from an (R, G, B) tuple to a hex color. :param value: The RGB value to convert :type value: tuple R, G and B should be in the range 0.0 - 1.0 """ color = ''.join(['%02x' % x1 for x1 in [int(x * 255) for x in value]]) return '#%s' % color
6449d5ecf8f3134ca320c784293d8ece44a84148
705,138
def _rgb_to_hex_string(rgb: tuple) -> str: """Convert RGB tuple to hex string.""" def clamp(x): return max(0, min(x, 255)) return "#{0:02x}{1:02x}{2:02x}".format(clamp(rgb[0]), clamp(rgb[1]), clamp(rgb[2]))
eafd166a67ac568cfad3da1fa16bdfcd054a914a
705,139
import torch import copy def clones(module, N): """Produce N identical layers. """ return torch.nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
2def7cf89def4d598253ca48cb04e670ecb54dfd
705,142
import math def bl2xy(lon: float, lat: float): """ 大地2000,经纬度转平面坐标,3度带 Param: lon (float): 经度 lat (float): 纬度 Returns: (x , y) : x坐标对应经度,y坐标对应纬度 """ # 3.1415926535898/180.0 iPI = 0.0174532925199433 # 3度带 zoneWide = 3 # 长半轴 a = 6378137 # 扁率 f = 1/298.257222101 projNo = int(lon/zoneWide) longitude0 = projNo*3 longitude0 = longitude0 * iPI longitude1 = lon * iPI latitude1 = lat * iPI e2 = 2 * f - f * f ee = e2 * (1.0 - e2) NN = a / math.sqrt(1.0 - e2 * math.sin(latitude1) * math.sin(latitude1)) T = math.tan(latitude1) * math.tan(latitude1) C = ee * math.cos(latitude1) * math.cos(latitude1) A = (longitude1 - longitude0) * math.cos(latitude1) M = a * ((1 - e2 / 4 - 3 * e2 * e2 / 64 - 5 * e2 * e2 * e2 / 256) * latitude1 - (3 * e2 / 8 + 3 * e2 * e2 / 32 + 45 * e2 * e2 * e2 / 1024) * math.sin(2 * latitude1) + (15 * e2 * e2 / 256 + 45 * e2 * e2 * e2 / 1024) * math.sin(4 * latitude1) - (35 * e2 * e2 * e2 / 3072) * math.sin(6 * latitude1)) xval = NN * (A + (1 - T + C) * A * A * A / 6 + (5 - 18 * T + T * T + 72 * C - 58 * ee) * A * A * A * A * A / 120) yval = M + NN * math.tan(latitude1) * (A * A / 2 + (5 - T + 9 * C + 4 * C * C) * A * A * A * A / 24 + (61 - 58 * T + T * T + 600 * C - 330 * ee) * A * A * A * A * A * A / 720) X0 = 1000000 * projNo + 500000 Y0 = 0 xval = xval + X0 yval = yval + Y0 return (xval, yval)
4f2166d7878998da5373a4fa6aff5fcee6f32c61
705,143
def build_scoring_matrix(alphabet, diag_score, off_diag_score, dash_score): """ Takes as input a set of characters alphabet and three scores diag_score, off_diag_score, and dash_score. The function returns a dictionary of dictionaries whose entries are indexed by pairs of characters in alphabet plus '-'. The score for any entry indexed by one or more dashes is dash_score. The score for the remaining diagonal entries is diag_score. Finally, the score for the remaining off-diagonal entries is off_diag_score """ alphabet.add('-') scoring_matri = {} for first_ltr in alphabet: temp = {} for sec_ltr in alphabet: if first_ltr == sec_ltr and first_ltr != '-': temp[sec_ltr] = diag_score elif first_ltr == '-' or sec_ltr == '-': temp[sec_ltr] = dash_score else: temp[sec_ltr] = off_diag_score scoring_matri[first_ltr] = temp return scoring_matri
703c3ef7fb6899a46a26d55dae740705b6953adb
705,146
import struct def set_real(bytearray_: bytearray, byte_index: int, real) -> bytearray: """Set Real value Notes: Datatype `real` is represented in 4 bytes in the PLC. The packed representation uses the `IEEE 754 binary32`. Args: bytearray_: buffer to write to. byte_index: byte index to start writing from. real: value to be written. Returns: Buffer with the value written. Examples: >>> data = bytearray(4) >>> snap7.util.set_real(data, 0, 123.321) bytearray(b'B\\xf6\\xa4Z') """ real = float(real) real = struct.pack('>f', real) _bytes = struct.unpack('4B', real) for i, b in enumerate(_bytes): bytearray_[byte_index + i] = b return bytearray_
bda32caab27adeae7c6710d4c26743b93533ccff
705,150
def get_shape(obj): """ Get the shape of a :code:'numpy.ndarray' or of a nested list. Parameters(obj): obj: The object of which to determine the shape. Returns: A tuple describing the shape of the :code:`ndarray` or the nested list or :code:`(1,)`` if obj is not an instance of either of these types. """ if hasattr(obj, "shape"): return obj.shape elif type(obj) == list: if obj == []: return (0,) else: return (len(obj),) + get_shape(obj[0]) else: return ()
d02d755f4b9e4a4dbde6c87ddfe0b5729a8c158e
705,152
from typing import Any from typing import List def as_list(x: Any) -> List[Any]: """Wrap argument into a list if it is not iterable. :param x: a (potential) singleton to wrap in a list. :returns: [x] if x is not iterable and x if it is. """ # don't treat strings as iterables. if isinstance(x, str): return [x] try: _ = iter(x) return x except TypeError: return [x]
4b1b26857d209a9f5b142908e3a35b1ce7b05be4
705,153
import re def remove_url(text): """ Supprime les URLs :param text: texte à transformer :return: texte transformé """ return re.sub(r'http\S+', '', text)
d0f3716808863d5e868da1efc4a7bb16ffa47ac1
705,155
def print_subheader(object_type): """ Print out a subheader for a text file. """ return """ ################################################################# # {0} ################################################################# """.format(object_type)
1ea7185f024ec7dc45a1ccac9f7e2feb6a2a6bf2
705,158
import time def condor_tables(sqlContext, hdir='hdfs:///project/monitoring/archive/condor/raw/metric', date=None, verbose=False): """ Parse HTCondor records Example of HTCondor recornd on HDFS {"data":{"AccountingGroup":"analysis.wverbeke","Badput":0.0,"CMSGroups":"[\"/cms\"]","CMSPrimaryDataTier":"MINIAODSIM","CMSPrimaryPrimaryDataset":"TTWJetsToLNu_TuneCUETP8M1_13TeV-amcatnloFXFX-madspin-pythia8","CMSPrimaryProcessedDataset":"RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6_ext2-v1","CRAB_AsyncDest":"T2_BE_IIHE","CRAB_DataBlock":"/TTWJetsToLNu_TuneCUETP8M1_13TeV-amcatnloFXFX-madspin-pythia8/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6_ext2-v1/MINIAODSIM#291c85fa-aab1-11e6-846b-02163e0184a6","CRAB_ISB":"https://cmsweb.cern.ch/crabcache","CRAB_Id":30,"CRAB_JobArch":"slc6_amd64_gcc530","CRAB_JobSW":"CMSSW_9_2_4","CRAB_JobType":"analysis","CRAB_OutLFNDir":"/store/user/wverbeke/heavyNeutrino/TTWJetsToLNu_TuneCUETP8M1_13TeV-amcatnloFXFX-madspin-pythia8/crab_Moriond2017_ext2-v1_ewkinoMCList-v7p1/171111_214448","CRAB_PrimaryDataset":"TTWJetsToLNu_TuneCUETP8M1_13TeV-amcatnloFXFX-madspin-pythia8","CRAB_Publish":false,"CRAB_PublishName":"crab_Moriond2017_ext2-v1_ewkinoMCList-v7p1-00000000000000000000000000000000","CRAB_Retry":0,"CRAB_SaveLogsFlag":true,"CRAB_SiteBlacklist":"[]","CRAB_SiteWhitelist":"[]","CRAB_SubmitterIpAddr":"193.58.172.33","CRAB_TaskEndTime":1513028688,"CRAB_TaskLifetimeDays":30,"CRAB_TaskWorker":"vocms052","CRAB_TransferOutputs":true,"CRAB_UserHN":"wverbeke","CRAB_Workflow":"171111_214448:wverbeke_crab_Moriond2017_ext2-v1_ewkinoMCList-v7p1","Campaign":"crab_wverbeke","ClusterId":20752288,"Cmd":"/data/srv/glidecondor/condor_local/spool/2259/0/cluster20752259.proc0.subproc0/gWMS-CMSRunAnalysis.sh","CommittedCoreHr":0.0,"CommittedSlotTime":0,"CommittedSuspensionTime":0,"CommittedTime":0,"CommittedWallClockHr":0.0,"CoreHr":0.0,"CoreSize":-1,"Country":"Unknown","CpuBadput":0.0,"CpuEff":0.0,"CpuTimeHr":0.0,"CumulativeRemoteSysCpu":0.0,"CumulativeRemoteUserCpu":0.0,"CumulativeSlotTime":0,"CumulativeSuspensionTime":0,"CurrentHosts":0,"DAGNodeName":"Job30","DAGParentNodeNames":"","DESIRED_Archs":"X86_64","DESIRED_CMSDataLocations":"T2_FR_IPHC,T2_CH_CERN_HLT,T1_ES_PIC,T2_DE_DESY,T2_BE_IIHE,T2_CH_CERN,T2_ES_IFCA","DESIRED_CMSDataset":"/TTWJetsToLNu_TuneCUETP8M1_13TeV-amcatnloFXFX-madspin-pythia8/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6_ext2-v1/MINIAODSIM","DESIRED_Overflow_Region":"none,none,none","DESIRED_Sites":["T2_FR_IPHC","T2_CH_CERN_HLT","T1_ES_PIC","T2_DE_DESY","T2_BE_IIHE","T2_CH_CERN","T2_ES_IFCA"],"DataCollection":1510475761000,"DataCollectionDate":1510475761000,"DataLocations":["T2_FR_IPHC","T2_CH_CERN_HLT","T1_ES_PIC","T2_DE_DESY","T2_BE_IIHE","T2_CH_CERN","T2_ES_IFCA"],"DataLocationsCount":7,"DesiredSiteCount":7,"DiskUsage":5032,"DiskUsageGB":0.005032,"EncryptExecuteDirectory":false,"EnteredCurrentStatus":1510436775000,"EstimatedWallTimeMins":1250,"ExecutableSize":9,"ExitBySignal":false,"ExitStatus":0,"GLIDEIN_CMSSite":"Unknown","GlobalJobId":"[email protected]#20752288.0#1510436775","HasSingularity":false,"ImageSize":9,"JOB_CMSSite":"$$(GLIDEIN_CMSSite:Unknown)","JOB_Gatekeeper":"Unknown","JobBatchName":"RunJobs.dag+20752259","JobPrio":10,"JobStatus":1,"JobUniverse":5,"MaxHosts":1,"MaxWallTimeMins":1250,"MemoryMB":0.0,"MinHosts":1,"NumJobCompletions":0,"NumJobStarts":0,"NumRestarts":0,"NumSystemHolds":0,"OVERFLOW_CHECK":false,"Original_DESIRED_Sites":["UNKNOWN"],"OutputFiles":2,"Owner":"cms1315","PostJobPrio1":-1510436758,"PostJobPrio2":0,"PreJobPrio1":1,"ProcId":0,"QDate":1510436775000,"QueueHrs":10.951667712198363,"REQUIRED_OS":"rhel6","Rank":0,"RecordTime":1510475761000,"RemoteSysCpu":0,"RemoteUserCpu":0,"RemoteWallClockTime":0,"RequestCpus":1,"RequestDisk":1,"RequestMemory":2000,"ScheddName":"[email protected]","ShouldTransferFiles":"YES","Site":"Unknown","SpoolOnEvict":false,"Status":"Idle","TaskType":"Analysis","Tier":"Unknown","TotalSubmitProcs":1,"TotalSuspensions":0,"TransferInputSizeMB":4,"Type":"analysis","Universe":"Vanilla","User":"cms1315@cms","VO":"cms","WMAgent_TaskType":"UNKNOWN","WallClockHr":0.0,"WhenToTransferOutput":"ON_EXIT_OR_EVICT","Workflow":"wverbeke_crab_Moriond2017_ext2-v1_ewkinoMCList-v7p1","metadata":{"id":"[email protected]#20752288.0#1510436775","timestamp":1510476202,"uuid":"8aa4b4fe-c785-11e7-ad57-fa163e15539a"},"x509UserProxyEmail":"[email protected]","x509UserProxyFQAN":["/DC=org/DC=terena/DC=tcs/C=BE/O=Universiteit Gent/CN=Willem Verbeke [email protected]","/cms/Role=NULL/Capability=NULL"],"x509UserProxyFirstFQAN":"/cms/Role=NULL/Capability=NULL","x509UserProxyVOName":"cms","x509userproxysubject":"/DC=org/DC=terena/DC=tcs/C=BE/O=Universiteit Gent/CN=Willem Verbeke [email protected]"},"metadata":{"_id":"380721bc-a12c-9b43-b545-740c10d2d0f0","hostname":"monit-amqsource-fafa51de8d.cern.ch","kafka_timestamp":1510476204057,"partition":"1","producer":"condor","timestamp":1510476204022,"topic":"condor_raw_metric","type":"metric","type_prefix":"raw","version":"001"}} """ if not date: # by default we read yesterdate data date = time.strftime("%Y/%m/%d", time.gmtime(time.time()-60*60*24)) hpath = '%s/%s' % (hdir, date) # create new spark DataFrame condor_df = sqlContext.read.json(hpath) condor_df.registerTempTable('condor_df') # condor_df = condor_df.select(unpack_struct("data", condor_df)) # extract data part of JSON records condor_df.printSchema() tables = {'condor_df':condor_df} return tables
8742f240f65755431a5b640e9481f657ce3048d5
705,159
from typing import Callable from typing import List from typing import Tuple def _contiguous_groups( length: int, comparator: Callable[[int, int], bool] ) -> List[Tuple[int, int]]: """Splits range(length) into approximate equivalence classes. Args: length: The length of the range to split. comparator: Determines if two indices have approximately equal items. Returns: A list of (inclusive_start, exclusive_end) range endpoints. Each corresponds to a run of approximately-equivalent items. """ result = [] start = 0 while start < length: past = start + 1 while past < length and comparator(start, past): past += 1 result.append((start, past)) start = past return result
fc25e286a2b6ec9ab7de15146e8b26922ea56e6b
705,160
def cleanRepl(matchobj): """ Clean up a directory name so that it can be written to a matplotlib title without encountering LaTeX escape sequences Replace backslashes with forward slashes replace underscores (subscript) with escaped underscores """ if matchobj.group(0) == r'\\': return '/' if matchobj.group(0) == r'_': return r'\_' if matchobj.group(0) == r'/': return '/' else: return ''
ffe9abb42df66780134e058ad24457a75f873055
705,162
def scale_range(x, x_range, y_range=(0.0, 1.0)): """ scale the number x from the range specified by x_range to the range specified by y_range :param x: the number to scale :type x: float :param x_range: the number range that x belongs to :type x_range: tuple :param y_range: the number range to convert x to, defaults to (0.0, 1.0) :type y_range: tuple :return: the scaled value :rtype: float """ x_min, x_max = x_range y_min, y_max = y_range return (y_max - y_min) * (x - x_min) / (x_max - x_min) + y_min
3e2f5185f1565d70e8d1d699f3b5b1e00d375e21
705,167
def guardian_join(team): """Returns a string of all of the parent guardians on the team joined together""" guardian_names = [] for player in team['team_players']: guardian_names.extend(player['guardians']) guardian_string = ", " guardian_string = guardian_string.join(guardian_names) return guardian_string
5b9c7908598a65bb5e465fae13258de99fbf8597
705,168
from typing import ByteString def is_prefix_of(prefix: ByteString, label: ByteString) -> bool: """ Whether label starts with prefix """ if len(prefix) > len(label): return False for (a,b) in zip(prefix, label): if a != b: return False return True
6be10ca432876f7847e2f8513e5205a9ae4d3c16
705,169
def get_worker_list(AnnotationSet): """ return a list of worker IDs """ return list(AnnotationSet.dataframe.columns)[1:]
0f18afa4bb70360e03a1a65c1ab3a5b4bbba7e38
705,171
from typing import Counter import math def sentence_bleu(hypothesis, reference, smoothing=True, order=4, **kwargs): """ Compute sentence-level BLEU score between a translation hypothesis and a reference. :param hypothesis: list of tokens or token ids :param reference: list of tokens or token ids :param smoothing: apply smoothing (recommended, especially for short sequences) :param order: count n-grams up to this value of n. :param kwargs: additional (unused) parameters :return: BLEU score (float) """ log_score = 0 if len(hypothesis) == 0: return 0 for i in range(order): hyp_ngrams = Counter(zip(*[hypothesis[j:] for j in range(i + 1)])) ref_ngrams = Counter(zip(*[reference[j:] for j in range(i + 1)])) numerator = sum(min(count, ref_ngrams[bigram]) for bigram, count in hyp_ngrams.items()) denominator = sum(hyp_ngrams.values()) if smoothing: numerator += 1 denominator += 1 score = numerator / denominator if score == 0: log_score += float('-inf') else: log_score += math.log(score) / order bp = min(1, math.exp(1 - len(reference) / len(hypothesis))) return math.exp(log_score) * bp
e3913cebdfe58ca55aa9c02d9faab4d8fc9ef3dd
705,174
def needs_update(targ_capacity, curr_capacity, num_up_to_date): """Return whether there are more batch updates to do. Inputs are the target size for the group, the current size of the group, and the number of members that already have the latest definition. """ return not (num_up_to_date >= curr_capacity == targ_capacity)
77981f3fdb57296503f34b0ea955b68b9f98db4c
705,177
from pathlib import Path def is_dir_exist(path): """Whether the directory exists""" path_info = Path(path) return path_info.is_dir()
8182e96399d2271bc8e3cd5c1a4201f3e2acd895
705,180
from datetime import datetime def to_date(string, format="%d/%m/%Y"): """Converts a string to datetime :param string: String containing the date. :type string: str :param format: The date format. Use %Y for year, %m for months and %d for daus, defaults to "%d/%m/%Y" :type format: str, optional :return: The present data in string format :rtype: `str` """ return datetime.strptime(string, format)
83fa8e8a0cdfae9546c7a83e55ddcf84ec667646
705,184
import time def nonce() -> str: """Return a nounce counter (monotonic clock). References: * https://support.kraken.com/hc/en-us/articles/360000906023-What-is-a-nonce- """ # pylint: disable=line-too-long return str(time.monotonic_ns())
fb6221fef4c2c8af66200c4c9da8f6253854b186
705,194
import string def base62_encode(number): """Encode a number in base62 (all digits + a-z + A-Z).""" base62chars = string.digits + string.ascii_letters l = [] while number > 0: remainder = number % 62 number = number // 62 l.insert(0, base62chars[remainder]) return ''.join(l) or '0'
b1f10fe69b6263d54f2e00a32b8260cbb3c42747
705,196
import random def random_swap(o_a, o_b): """ Randomly swap elements of two observation vectors and return new vectors. :param o_a: observation vector a :param o_b: observation vector b :return: shuffled vectors """ X, Y = [], [] tf = [True, False] for x, y in zip(o_a, o_b): if random.choice(tf): x, y = y, x X.append(x) Y.append(y) return X, Y
f243e91e5b281c682601fdb8df49bd7e6209274c
705,198
from functools import reduce def gcd(numbers): """Return greatest common divisor of integer numbers. Using Euclid's algorithm. Examples -------- >>> gcd([4]) 4 >>> gcd([3, 6]) 3 >>> gcd([6, 7]) 1 """ def _gcd(a, b): """Return greatest common divisor of two integer numbers.""" while b: a, b = b, a % b return a return reduce(_gcd, numbers)
da7ae2a24649bc05e233533735baf850a37dcc5a
705,199
def pair_keys_to_items(items, key): """ Convert the list of key:value dicts (nics or disks) into a dict. The key for the new dict is one value of the current dict identified by the key parameter. If it does not exist, then the key is the order number in the list. """ new_items = {} for i, item in enumerate(items): new_items[item.get(key, i)] = item return new_items
92c66bfbb298e767b3fedbfcfd48ad87ac1162ef
705,206
def empty_when_none(_string=None): """If _string if None, return an empty string, otherwise return string. """ if _string is None: return "" else: return str(_string)
402186ee7b4ba9c3968f81bee23134067d0f260e
705,208
import hashlib def file_hashes(f, bufsize=16000000): """ computes md5, sha1, sha256 from a file obj. intended for large files. returns 3-tuple of hexstrings """ md5 = hashlib.md5() sha1 = hashlib.sha1() sha256 = hashlib.sha256() while True: buf = f.read(bufsize) if len(buf) == 0: break md5.update(buf) sha1.update(buf) sha256.update(buf) return (md5.hexdigest(), sha1.hexdigest(), sha256.hexdigest())
4e23a0d99cda07325ba3a14675bfb515c12d2950
705,209
from typing import List import json def get_edfi_payloads(context, dbt_run_result, table_reference: str) -> List: """ Extract BigQUery table and return the resulting JSON as a dict. """ df = context.resources.warehouse.download_table(table_reference) df_json = df.to_json(orient="records", date_format="iso") df_dict = json.loads(df_json) return df_dict
c2ad0026ad4e56a256a824a4c1fae0762aaa51b7
705,211
def add_rnn_encoder_arguments(group): """Define arguments for RNN encoder.""" group.add_argument( "--elayers", default=4, type=int, help="Number of encoder layers (for shared recognition part " "in multi-speaker asr mode)", ) group.add_argument( "--eunits", "-u", default=300, type=int, help="Number of encoder hidden units", ) group.add_argument( "--eprojs", default=320, type=int, help="Number of encoder projection units" ) group.add_argument( "--subsample", default="1", type=str, help="Subsample input frames x_y_z means subsample every x frame " "at 1st layer, every y frame at 2nd layer etc.", ) return group
64a65bd496402dedfe98c4bd0d5bbc516c87a398
705,212
import re def validate_bucket_name(bucket_name): """ Validate bucket name Bucket name must be compatible with DNS name (RFC 1123): - Less than 63 characters - Valid character set [a-z0-9-] - Can not begin and end with "-" Returns Trues if valid, False otherwise """ if len(bucket_name) < 6 or len(bucket_name) > 63: return False if bucket_name.startswith("-") or bucket_name.endswith("-"): return False pattern = re.compile("^[0-9a-z]([0-9a-z-]{0,61})[0-9a-z]$") if not pattern.match(bucket_name): return False return True
1d759408d097143b93b0af172bf8e73fe02e283a
705,215
def coerce_types(T1, T2): """Coerce types T1 and T2 to a common type. Coercion is performed according to this table, where "N/A" means that a TypeError exception is raised. +----------+-----------+-----------+-----------+----------+ | | int | Fraction | Decimal | float | +----------+-----------+-----------+-----------+----------+ | int | int | Fraction | Decimal | float | | Fraction | Fraction | Fraction | N/A | float | | Decimal | Decimal | N/A | Decimal | float | | float | float | float | float | float | +----------+-----------+-----------+-----------+----------+ Subclasses trump their parent class; two subclasses of the same base class will be coerced to the second of the two. """ # Get the common/fast cases out of the way first. if T1 is T2: return T1 if T1 is int: return T2 if T2 is int: return T1 # Subclasses trump their parent class. if issubclass(T2, T1): return T2 if issubclass(T1, T2): return T1 # Floats trump everything else. if issubclass(T2, float): return T2 if issubclass(T1, float): return T1 # Subclasses of the same base class give priority to the second. if T1.__base__ is T2.__base__: return T2 # Otherwise, just give up. raise TypeError('cannot coerce types %r and %r' % (T1, T2))
7d412df0182ca6e1f43bfc6ce8e7c6ce1a738bed
705,221
import itertools def pad_ends( sequence, pad_left=True, left_pad_symbol="<s>", right_pad_symbol="</s>" ): """ Pad sentence ends with start- and end-of-sentence tokens In speech recognition, it is important to predict the end of sentence and use the start of sentence to condition predictions. Typically this is done by adding special tokens (usually <s> and </s>) at the ends of each sentence. The <s> token should not be predicted, so some special care needs to be taken for unigrams. Arguments --------- sequence : iterator The sequence (any iterable type) to pad. pad_left : bool Whether to pad on the left side as well. True by default. left_pad_symbol : any The token to use for left side padding. "<s>" by default. right_pad_symbol : any The token to use for right side padding. "</s>" by deault. Returns ------- generator A generator that yields the padded sequence. Example ------- >>> for token in pad_ends(["Speech", "Brain"]): ... print(token) <s> Speech Brain </s> """ if pad_left: return itertools.chain( (left_pad_symbol,), tuple(sequence), (right_pad_symbol,) ) else: return itertools.chain(tuple(sequence), (right_pad_symbol,))
e4a341d1e777adab36ec0c0e7996e23203c53478
705,223