content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def is_fouling_team_in_penalty(event): """Returns True if fouling team over the limit, else False""" fouls_to_give_prior_to_foul = event.previous_event.fouls_to_give[event.team_id] return fouls_to_give_prior_to_foul == 0
ac1578af1092586a30b8fc9cdb3e5814da1f1544
707,440
import warnings def lmc(wave, tau_v=1, **kwargs): """ Pei 1992 LMC extinction curve. :param wave: The wavelengths at which optical depth estimates are desired. :param tau_v: (default: 1) The optical depth at 5500\AA, used to normalize the attenuation curve. :returns tau: The optical depth at each wavelength. """ if (wave < 1e3).any(): warnings.warn('LMC: extinction extrapolation below 1000AA is poor') mic = wave * 1e-4 aa = [175., 19., 0.023, 0.005, 0.006, 0.020] ll = [0.046, 0.08, 0.22, 9.7, 18., 25.] bb = [90., 5.50, -1.95, -1.95, -1.80, 0.00] nn = [2.0, 4.5, 2.0, 2.0, 2.0, 2.0] abs_ab = mic * 0. norm_v = 0 # hack to go from tau_b to tau_v mic_5500 = 5500 * 1e-4 for i, a in enumerate(aa): norm_v += aa[i] / ((mic_5500 / ll[i])**nn[i] + (ll[i] / mic_5500)**nn[i] + bb[i]) abs_ab += aa[i] / ((mic / ll[i])**nn[i] + (ll[i] / mic)**nn[i] + bb[i]) return tau_v * (abs_ab / norm_v)
04c89605e8ad4188c62b631e173a9c8fe714958a
707,441
def minMax(xs): """Calcule le minimum et le maximum d'un tableau de valeur xs (non-vide !)""" min, max = xs[0], xs[0] for x in xs[1:]: if x < min: min = x elif x > max: max = x return min,max
8453b71e5b62592f38f4be84f4366fb02bd0171b
707,442
def compute_prefix_function(pattern): """ Computes the prefix array for KMP. :param pattern: :type pattern: str :return: """ m = len(pattern) prefixes = [0]*(m+1) i = 0 for q in range(2, m + 1): while i > 0 and pattern[i] != pattern[q - 1]: i = prefixes[i] if pattern[i] == pattern[q - 1]: i += 1 prefixes[q] = i return prefixes[1:]
7933cc33eba53247e858ae40b9691d101c7030e6
707,443
import math def sigmoid(num): """ Find the sigmoid of a number. :type number: number :param number: The number to find the sigmoid of :return: The result of the sigmoid :rtype: number >>> sigmoid(1) 0.7310585786300049 """ # Return the calculated value return 1 / (1 + math.exp(-num))
73730a39627317011d5625ab85c146b6bd7793d8
707,444
def get_name_and_version(requirements_line: str) -> tuple[str, ...]: """Get the name a version of a package from a line in the requirement file.""" full_name, version = requirements_line.split(" ", 1)[0].split("==") name_without_extras = full_name.split("[", 1)[0] return name_without_extras, version
424b3c3138ba223610fdfa1cfa6d415b8e31aff3
707,445
import locale import itertools def validateTextFile(fileWithPath): """ Test if a file is a plain text file and can be read :param fileWithPath(str): File Path :return: """ try: file = open(fileWithPath, "r", encoding=locale.getpreferredencoding(), errors="strict") # Read only a couple of lines in the file for line in itertools.islice(file, 10): line = line file.readlines() # Close the file handle file.close() # Return the systems preferred encoding return locale.getpreferredencoding() except: validencodings = ["utf-8", "ascii", "utf-16", "utf-32", "iso-8859-1", "latin-1"] for currentEncoding in validencodings: try: file = open(fileWithPath, "r", encoding=currentEncoding, errors="strict") # Read only a couple of lines in the file for line in itertools.islice(file, 10): line = line # Close the file handle file.close() # Return the succeded encoding return currentEncoding except: # Error occured while reading the file, skip to next iteration continue # Error, no encoding was correct return None
22167a4501ca584061f1bddcc7738f00d4390085
707,446
from bs4 import BeautifulSoup def get_title(filename="test.html"): """Read the specified file and load it into BeautifulSoup. Return the title tag """ with open(filename, "r") as my_file: file_string = my_file.read() file_soup = BeautifulSoup(file_string, 'html.parser') #find all of the a tags with href attribute title = file_soup.select("title") return title
31c35588bb10132509a0d35b49a9b7eeed902018
707,447
import re def is_valid_dump_key(dump_key): """ True if the `dump_key` is in the valid format of "database_name/timestamp.dump" """ regexmatch = re.match( r'^[\w-]+/\d{4}_\d{2}_\d{2}_\d{2}_\d{2}_\d{2}_\d+\.\w+\.dump$', dump_key, ) return regexmatch
66fd7d465f641a96bd8b22e95918a6dcbefef658
707,448
def fill_none(pre_made_replays_list): """Fill none and reformat some fields in a pre-made replays list. :param pre_made_replays_list: pre-made replays list from ballchasing.com. :return: formatted list. """ for replay in pre_made_replays_list: if replay["region"] is None: replay["region"] = "North America" replay["phase"] = "Qualifier" replay["stage"] = "Tiebreaker" replay["round"] = "Finals" if replay['region'] == 'Main Event': replay['region'] = 'World' elif replay['region'] == 'Europe' and replay['phase'] == 'Tiebreaker': replay["phase"] = "Qualifier" replay["stage"] = "Tiebreaker" if replay["match"] == "EG vs 00": replay["round"] = "Lower Finals" else: replay["round"] = "Upper Finals" return pre_made_replays_list
ee900227a8afcba71e6a00ef475892da4fdc3e3b
707,449
def reverse_dict2(d): """Reverses direction of dependence dict >>> d = {'a': (1, 2), 'b': (2, 3), 'c':()} >>> reverse_dict(d) # doctest: +SKIP {1: ('a',), 2: ('a', 'b'), 3: ('b',)} :note: dict order are not deterministic. As we iterate on the input dict, it make the output of this function depend on the dict order. So this function output order should be considered as undeterministic. """ result = {} for key in d: for val in d[key]: result[val] = result.get(val, tuple()) + (key, ) return result
2419538a13699015f8fefa156e89cf9b1960e358
707,450
import random def Flip(p, y='Y', n='N'): """Returns y with probability p; otherwise n.""" return y if random.random() <= p else n
072e170e3f37508a04f8bdbed22470b178f05ab9
707,451
import configparser import os def parse_config2(filename=None): """ https://docs.python.org/3.5/library/configparser.html :param filename: filename to parse config :return: config_parse result """ _config = configparser.ConfigParser(allow_no_value=True) if filename: # ConfigParser does not create a file if it doesn't exist, so I will create an empty one. if not os.path.isfile(filename): with open(filename, 'w', encoding='utf-8') as f: print('', file=f) _config.read_file(open(filename, encoding='utf-8')) return _config
cf260f09e4c293915ab226b915aebed6cb98113f
707,452
def addneq_parse_residualline(line: str) -> dict: """ Parse en linje med dagsløsningsresidualer fra en ADDNEQ-fil. Udtræk stationsnavn, samt retning (N/E/U), spredning og derefter et vilkårligt antal døgnresidualer. En serie linjer kan se således ud: GESR N 0.07 0.02 -0.06 GESR E 0.10 -0.00 -0.10 GESR U 0.23 -0.10 0.20 """ params = line.split() return { "STATION NAME": params[0], "DIRECTION": params[1], "STDDEV": float(params[2]), "RES": [float(x) for x in params[3:]], }
6d1556cbd01f3fe4cd66dcad231e41fa6b1b9470
707,453
def fahrenheit_to_celsius(fahrenheit): """Convert a Fahrenheit temperature to Celsius.""" return (fahrenheit - 32.0) / 1.8
4aee3dd0b54450fabf7a3a01d340b45a89caeaa3
707,454
import random import itertools def sample_blocks(num_layers, num_approx): """Generate approx block permutations by sampling w/o replacement. Leave the first and last blocks as ReLU""" perms = [] for _ in range(1000): perms.append(sorted(random.sample(list(range(0,num_layers)), num_approx))) # Remove duplicates perms.sort() return [p for p,_ in itertools.groupby(perms) if len(p) == num_approx]
b4b75e77b3749bc7766c709d86bf1f694898fc0d
707,455
import os def expand(directory: str) -> str: """Apply expanduser and expandvars to directory to expand '~' and env vars.""" temp1 = os.path.expanduser(directory) return os.path.expandvars(temp1)
ffad07715d5425211304e340c084c8f134bbcb22
707,456
import sys def gather_ensemble_info(nmme_model): """Gathers ensemble information based on NMME model.""" # Number of ensembles in the forecast (ens_num) # Ensemble start index (ens_start) # Ensemble end index (ens_end) if nmme_model == "CFSv2": ens_num=24 ens_start=1 ens_end=24 elif nmme_model == "GEOSv2": ens_num=10 ens_start=25 ens_end=34 elif nmme_model == "CCM4": ens_num=10 ens_start=35 ens_end=44 elif nmme_model == "GNEMO": ens_num=10 ens_start=45 ens_end=54 elif nmme_model == "CCSM4": ens_num=10 ens_start=55 ens_end=64 elif nmme_model == "GFDL": ens_num=30 ens_start=65 ens_end=94 else: print(f"[ERR] Invalid argument for nmme_model! Received {nmme_model}") sys.exit(1) return ens_num, ens_start, ens_end
56516751dc87415b6a08541eadb02b67a5bc6629
707,457
import random import string def _random_exptname(): """Generate randome expt name NNNNNNNN_NNNNNN, where N is any number 0..9""" r = ''.join(random.choice(string.digits) for _ in range(8)) r = r + '_' + ''.join(random.choice(string.digits) for _ in range(6)) return r
d9c72ed4bf742adf50e1fdad4f6acb1cc0046167
707,458
from typing import Union from pathlib import Path import subprocess def is_tracked_upstream(folder: Union[str, Path]) -> bool: """ Check if the current checked-out branch is tracked upstream. """ try: command = "git rev-parse --symbolic-full-name --abbrev-ref @{u}" subprocess.run( command.split(), stderr=subprocess.PIPE, stdout=subprocess.PIPE, encoding="utf-8", check=True, cwd=folder, ) return True except subprocess.CalledProcessError as exc: if "HEAD" in exc.stderr: raise OSError("No branch checked out") return False
8c58efe0d0619aaa6517d656ba88f1e29653197a
707,459
def get_directions_id(destination): """Get place ID for directions, which is place ID for associated destination, if an event""" if hasattr(destination, 'destination'): # event with a related destination; use it for directions if destination.destination: return destination.destination.id else: # event without a destination return None else: # not an event return destination.id
f7cd182cb5ea344c341bf9bfaa7a4389335ae353
707,460
from typing import Any from typing import cast def parse_year(candidate: Any) -> int: """Parses the given candidate as a year literal. Raises a ValueError when the candidate is not a valid year.""" if candidate is not None and not isinstance(candidate, int): raise TypeError("Argument year is expected to be an int, " "but is {}".format(type(candidate))) return cast(int, candidate)
337cc3be16e1e1246d1d1f02b55665c655fe131f
707,461
def username_in_path(username, path_): """Checks if a username is contained in URL""" if username in path_: return True return False
131a8fa102fd0a0f036da81030b005f92ea9aab0
707,463
def str_parse_as_utf8(content) -> str: """Returns the provided content decoded as utf-8.""" return content.decode('utf-8')
75b8d5f1f8867c50b08146cc3edc1d0ab630280a
707,464
def remove_start(s: str) -> str: """ Clear string from start '-' symbol :param s: :return: """ return s[1:] if s.startswith('-') else s
03504a3094798f6582bcae40233f7215e8d4d780
707,466
import os def list_dirs(path): """遍历文件夹下的文件夹,并返回文件夹路径列表 :param path: :return: """ if not os.path.exists(path): os.mkdir(path) _path_list = [] for lists in os.listdir(path): sub_path = os.path.join(path, lists) # 如果是文件夹 if os.path.isdir(sub_path): _path_list.append(sub_path) return _path_list
b5a8e95e45adfbb1621762a9654aa237b0325d15
707,467
import os import unittest def get_tests(): """Grab all of the tests to provide them to setup.py""" start_dir = os.path.dirname(__file__) return unittest.TestLoader().discover(start_dir, pattern='*.py')
68733f14ec7427705698c36ae411caaaff1c0e02
707,468
def definition(): """ Most recent student numbers and fees by set (i.e. by year, costcentre and set category.), aggregated by fee, aos code, seesion and fee_category. """ sql = """ select s.set_id, s.acad_year, s.costc, s.set_cat_id, fsc.description as set_cat_description, fs.fee_cat_id as fee_cat, cc.default_aos_code, n.aos_code, n.session, o.description as origin_description, o.origin_id, SUM(n.student_count) as student_count, a.fee_scheme_id, SUM(f.gross_fee-f.waiver) as individual_fee, SUM(n.student_count*(f.gross_fee-f.waiver)) as net_fee FROM s_number n INNER JOIN v_s_instance_mri i ON i.instance_Id = n.instance_id INNER JOIN f_set s ON s.set_id = i.set_id INNER JOIN fs_cost_centre cc ON cc.costc = s.costc INNER JOIN f_set_cat fsc ON fsc.set_cat_id = s.set_cat_id INNER JOIN s_fee_status fs ON fs.fee_status_id = n.fee_status_id INNER JOIN c_aos_code a ON a.aos_code = n.aos_code INNER JOIN s_fee f ON f.acad_year = s.acad_year AND f.fee_cat_id = fs.fee_cat_id AND f.fee_scheme_id = a.fee_scheme_id AND f.session = n.session INNER JOIN s_origin o ON o.origin_id = n.origin_id GROUP BY s.acad_year, s.costc, s.set_cat_id, fs.fee_cat_id, n.aos_code, n.session, a.fee_scheme_id, fsc.description, o.description, s.set_id, cc.default_aos_code, o.origin_id """ return sql
18ded7340bf8786a531faf76c702e682bb44e0f3
707,469
def draw_adjacency_list(): """Solution to exercise R-14.4. Draw an adjacency list representation of the undirected graph shown in Figure 14.1. --------------------------------------------------------------------------- Solution: --------------------------------------------------------------------------- I will re-use the edge labels from Exercise R-14.3: Snoeyink --- Goodrich a Garg --- Goodrich b Garg --- Tamassia c Goldwasser --- Goodrich d Goldwasser --- Tamassia e Goodrich --- Tamassia f Goodrich --- Vitter g Goodrich --- Chiang h Tamassia --- Tollis i Tamassia --- Vitter j Tamassia --- Preparata k Tamassia --- Chiang l Tollis --- Vitter m Vitter --- Preparata n Preparata --- Chiang o The adjacency list V is a list of vertices v that each point to a collection I(v) that contains the incident edges of v. Snoeyink --> {a} Garg --> {b, c} Goldwasser --> {d, e} Goodrich --> {a, b, d, f, g, h} Tamassia --> {c, e, f, i, j, k, l} Vitter --> {g, j, m, n} Chiang --> {h, l, o} Tollis --> {i, m} Preparata --> {k, n, o} Note that each edge appears twice in the adjacency list, for a total of 2*m = 2*15 = 30 edges. """ return True
95c02ad974f2d964596cf770708ed11aa061ea49
707,470
def dict_to_one(dp_dict): """Input a dictionary, return a dictionary that all items are set to one. Used for disable dropout, dropconnect layer and so on. Parameters ---------- dp_dict : dictionary The dictionary contains key and number, e.g. keeping probabilities. Examples -------- >>> dp_dict = dict_to_one( network.all_drop ) >>> dp_dict = dict_to_one( network.all_drop ) >>> feed_dict.update(dp_dict) """ return {x: 1 for x in dp_dict}
9d18b027a0458ca6e769a932f00705a32edcb3e7
707,471
def Dc(z, unit, cosmo): """ Input: z: redshift unit: distance unit in kpc, Mpc, ... cosmo: dicitonary of cosmology parameters Output: res: comoving distance in unit as defined by variable 'unit' """ res = cosmo.comoving_distance(z).to_value(unit) #*cosmo.h return res
02985b75bd24b2a18b07f7f3e158f3c6217fdf18
707,472
import subprocess def check_conf(conf_filepath): """Wrap haproxy -c -f. Args: conf_filepath: Str, path to an haproxy configuration file. Returns: valid_config: Bool, true if configuration passed parsing. """ try: subprocess.check_output(['haproxy', '-c', '-f', conf_filepath], stderr=subprocess.STDOUT) valid_config = True error_output = None except subprocess.CalledProcessError as e: valid_config = False error_output = e.output return (valid_config, error_output)
ceb2f78bcd4a688eb17c88915ebfde3f5a142e49
707,474
def get_genome_dir(infra_id, genver=None, annver=None, key=None): """Return the genome directory name from infra_id and optional arguments.""" dirname = f"{infra_id}" if genver is not None: dirname += f".gnm{genver}" if annver is not None: dirname += f".ann{annver}" if key is not None: dirname += f".{key}" return dirname
ab033772575ae30ae346f96aed840c48fb01c556
707,475
def uniq(string): """Removes duplicate words from a string (only the second duplicates). The sequence of the words will not be changed. """ words = string.split() return ' '.join(sorted(set(words), key=words.index))
2e5b6c51bc90f3a2bd7a4c3e845f7ae330390a76
707,478
def coo_index_to_data(index): """ Converts data index (row, col) to 1-based pixel-centerd (x,y) coordinates of the center ot the pixel index: (int, int) or int (row,col) index of the pixel in dtatabel or single row or col index """ return (index[1] + 1.0, index[0] + 1.0)
5cf3ee2cc4ea234aaeb2d9de97e92b41c5daf149
707,480
def generateListPermutations(elements, level=0): """Generate all possible permutations of the list 'elements'.""" #print(" " * level, "gP(", elements, ")") if len(elements) == 0: return [[]] permutations = [] for e in elements: reduced = elements[:] reduced.remove(e) reducedPermutations = generateListPermutations(reduced, level + 1) #print(" "*level, "reduced", reducedPermutations) for p in reducedPermutations: p.insert(0, e) permutations.append(p) return permutations
1894b6726bedaaf634e8c7ac56fc1abd9e204eef
707,481
import logging import sys def get_logger(base_name, file_name=None): """ get a logger that write logs to both stdout and a file. Default logging level is info so remember to :param base_name: :param file_name: :param logging_level: :return: """ if (file_name is None): file_name = base_name logger = logging.getLogger(base_name) logger.setLevel(logging.INFO) # create console handler & file handler ch = logging.StreamHandler() ch.setStream(sys.stdout) fi = logging.FileHandler(filename="..\logs\\" + file_name + ".log") # create formatter formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') # add formatter to channels ch.setFormatter(formatter) fi.setFormatter(formatter) # add channels to logger logger.addHandler(ch) logger.addHandler(fi) return logger
6ac5b8d78610179118605c6918998f9314a23ec4
707,482
def _split(num): """split the num to a list of every bits of it""" # xxxx.xx => xxxxxx num = num * 100 result = [] for i in range(16): tmp = num // 10 ** i if tmp == 0: return result result.append(tmp % 10) return result
575068b9b52fdff08522a75d8357db1d0ab86546
707,483
def clean_up_tokenization_spaces(out_string): """Converts an output string (de-BPE-ed) using de-tokenization algorithm from OpenAI GPT.""" out_string = out_string.replace('<unk>', '') out_string = out_string.replace(' .', '.').replace(' ?', '?').replace(' !', '!').replace(' ,', ',' ).replace(" ' ", "'").replace(" n't", "n't").replace(" 'm", "'m").replace(" do not", " don't" ).replace(" 's", "'s").replace(" 've", "'ve").replace(" 're", "'re") return out_string
0bd51ca7dbaa36569c0d2f18d510f1c6a92e1822
707,484
def _is_class(s): """Imports from a class/object like import DefaultJsonProtocol._""" return s.startswith('import ') and len(s) > 7 and s[7].isupper()
deee946066b5b5fc548275dd2cce7ebc7023626d
707,486
def longest_substring_using_lists(s: str) -> int: """ find the longest substring without repeating characters 644 ms 14.3 MB >>> longest_substring_using_lists("abac") 3 >>> longest_substring_using_lists("abcabcbb") 3 >>> longest_substring_using_lists("bbbbb") 1 >>> longest_substring_using_lists("pwwkew") 3 """ words = list() longest = 0 for char in s: # for each character removals = [] for word_idx in range(len(words)): # check all found words for the char word = words[word_idx] if char in word: # if it exists then set its length to longest if it is the longest longest = max(longest, len(word)) removals.append(word) else: # else add char to word words[word_idx] += char for remove in removals: words.remove(remove) # add char into words words.append(char) return max(longest, *[len(word) for word in words])
4292af29c59ea6210cde28745f91f1e9573b7104
707,487
def pad_sents(sents, pad_token): """ Pad list of sentences(SMILES) according to the longest sentence in the batch. @param sents (list[list[str]]): list of SMILES, where each sentence is represented as a list of tokens @param pad_token (str): padding token @returns sents_padded (list[list[str]]): list of SMILES where SMILES shorter than the max length SMILES are padded out with the pad_token, such that each SMILES in the batch now has equal length. """ sents_padded = [] max_length = max([len(sentence) for sentence in sents]) sents_padded = [sentence+(max_length-len(sentence))*[pad_token] for sentence in sents] return sents_padded
8f0eabfaaa18eafa84366a2f20ed2ddd633dacc6
707,488
import typing def residual_block( x, filters: int, weight_decay: float, *, strides: typing.Union[int, typing.Tuple[int, int]], dilation: typing.Union[int, typing.Tuple[int, int]], groups: int, base_width: int, downsample, use_basic_block: bool, use_cbam: bool, cbam_channel_reduction: int, activation: str, pre_activation: bool, small_input: bool, name: str, ): """ Residual block. Design follows [2] where Strides=2 in the 3x3 convolution instead of the first 1x1 convolution for bottleneck block. This increases the Top1 for ~0.5, with a slight performance drawback of ~5% images/sec. Last BN in each residual branch are zero-initialized following [3] so that the residual branch starts with zeros and each residual block behaves like an identity.This improves the model by 0.2~0.3%. - Attention Layers - CBAM: Convolutional Block Attention Module [1] Deep Residual Learning for Image Recognition https://arxiv.org/abs/1512.03385 [2] resnet_50_v1_5_for_pytorch https://ngc.nvidia.com/catalog/model-scripts/nvidia [3] Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour https://arxiv.org/abs/1706.02677 [4] Identity Mappings in Deep Residual Networks https://arxiv.org/abs/1603.05027 """ x = eval("basic" if use_basic_block else "bottleneck")( x, filters, weight_decay, strides=strides, dilation=dilation, groups=groups, base_width=base_width, downsample=downsample, use_cbam=use_cbam, cbam_channel_reduction=cbam_channel_reduction, activation=activation, pre_activation=pre_activation, small_input=small_input, name=name, ) return x
f2021a89e2d737e73bfef3fb7dc127c3bbb5d0b7
707,489
def byte_list_to_nbit_le_list(data, bitwidth, pad=0x00): """! @brief Convert a list of bytes to a list of n-bit integers (little endian) If the length of the data list is not a multiple of `bitwidth` // 8, then the pad value is used for the additional required bytes. @param data List of bytes. @param bitwidth Width in bits of the resulting values. @param pad Optional value used to pad input data if not aligned to the bitwidth. @result List of integer values that are `bitwidth` bits wide. """ bytewidth = bitwidth // 8 datalen = len(data) // bytewidth * bytewidth res = [sum((data[offset + i] << (i * 8)) for i in range(bytewidth)) for offset in range(0, datalen, bytewidth) ] remainder = len(data) % bytewidth if remainder != 0: pad_count = bytewidth - remainder padded_data = list(data[-remainder:]) + [pad] * pad_count res.append(sum((padded_data[i] << (i * 8)) for i in range(bytewidth))) return res
b92bbc28cc2ffd59ae9ca2e459842d7f4b284d18
707,490
import hashlib def create_SHA_256_hash_of_file(file): """ Function that returns the SHA 256 hash of 'file'.\n Logic taken from https://www.quickprogrammingtips.com/python/how-to-calculate-sha256-hash-of-a-file-in-python.html """ sha256_hash = hashlib.sha256() with open(file, "rb") as f: # Read and update hash string value in blocks of 4K for byte_block in iter(lambda: f.read(4096), b""): sha256_hash.update(byte_block) # Converting to upper case because that's what is required by the policy # service. See their code: # https://dev.azure.com/msasg/Bing_and_IPG/_git/Aether?path=/src/aether/platform/backendV2/BlueBox/PolicyService/Microsoft.MachineLearning.PolicyService/Workers/CatalogValidation.cs return sha256_hash.hexdigest().upper()
14f62a49ea54f5fceb719c4df601fde165f5e55c
707,491
def partition_average(partition): """Given a partition, calculates the expected number of words sharing the same hint""" score = 0 total = 0 for hint in partition: score += len(partition[hint])**2 total += len(partition[hint]) return score / total
944f514e925a86f3be431bd4d56970d92d16f570
707,492
def passstore(config, name): """Get password file""" return config.passroot / name
d0ca8c71650bd98dacd7d6ff9ed061aba3f2c43a
707,493
def CheckTreeIsOpen(input_api, output_api, url, closed, url_text): """Similar to the one in presubmit_canned_checks except it shows an helpful status text instead. """ assert(input_api.is_committing) try: connection = input_api.urllib2.urlopen(url) status = connection.read() connection.close() if input_api.re.match(closed, status): long_text = status + '\n' + url try: connection = input_api.urllib2.urlopen(url_text) text = connection.read() connection.close() match = input_api.re.search(r"\<div class\=\"Notice\"\>(.*)\<\/div\>", text) if match: long_text = match.group(1).strip() except IOError: pass return [output_api.PresubmitPromptWarning("The tree is closed.", long_text=long_text)] except IOError: pass return []
540dd0ceb9c305907b0439b678a6444ca24c3f76
707,494
def is_catalogue_link(link): """check whether the specified link points to a catalogue""" return link['type'] == 'application/atom+xml' and 'rel' not in link
bc6e2e7f5c34f6ea198036cf1404fef8f7e7b214
707,495
from typing import Sequence from typing import Set async def get_non_existent_ids(collection, id_list: Sequence[str]) -> Set[str]: """ Return the IDs that are in `id_list`, but don't exist in the specified `collection`. :param collection: the database collection to check :param id_list: a list of document IDs to check for existence :return: a list of non-existent IDs """ existing_group_ids = await collection.distinct("_id", {"_id": {"$in": id_list}}) return set(id_list) - set(existing_group_ids)
b13c61f4528c36a9d78a3687ce84c39158399142
707,496
def _ParseProjectNameMatch(project_name): """Process the passed project name and determine the best representation. Args: project_name: a string with the project name matched in a regex Returns: A minimal representation of the project name, None if no valid content. """ if not project_name: return None return project_name.lstrip().rstrip('#: \t\n')
cb9f92a26c7157a5125fbdb5dd8badd7ffd23055
707,497
def explore_validation_time_gap_threshold_segments(participant_list, time_gap_list = [100, 200, 300, 400, 500, 1000, 2000], prune_length = None, auto_partition_low_quality_segments = False): """Explores different threshiold values for the invalid time gaps in the Segments for all Participants in the list """ seglen = 0 segs = 0 participants = [] for p in participant_list: print("pid:", p.pid) if p.require_valid_segments == True: raise Exception("explore_validation_threshold_segments should be called with a list of Participants with require_valid_segments = False") tvalidity = [] for seg in p.segments: seglen += seg.completion_time segs += len(p.segments) for tresh in time_gap_list: ##time-gap invc = 0 invsegs=[] for seg in p.segments: if seg.calc_validity2(tresh) == False: invc +=1 if len(invsegs)>0: print("seg:",invsegs) tvalidity.append((tresh, invc)) participants.append( (p.pid,tvalidity, len(p.segments) ) ) print ( (tvalidity, len(p.segments)) ) print("average seg len",seglen/float(segs)) return participants
bd88f292a00986212ae36e383c4bb4e3cd94067c
707,498
def _get_lspci_name(line): """Reads and returns a 'name' from a line of `lspci` output.""" hush = line.split('[') return '['.join(hush[0:-1]).strip()
92910d0f4d9dce1689ed22a963932fb85d8e2677
707,499
def get_child_right_position(position: int) -> int: """ heap helper function get the position of the right child of the current node >>> get_child_right_position(0) 2 """ return (2 * position) + 2
2a5128a89ac35fe846d296d6b92c608e50b80a45
707,500
def get_label_parts(label): """returns the parts of an absolute label as a list""" return label[2:].replace(":", "/").split("/")
44998aad262f04fdb4da9e7d96d2a2b3afb27502
707,502
def split_range(r, n): """ Computes the indices of segments after splitting a range of r values into n segments. Parameters ---------- r : int Size of the range vector. n : int The number of splits. Returns ------- segments : list The list of lists of first and last indices of segments. Example ------- >>> split_range(8, 2) [[0, 4], [4, 8]] """ step = int(r / n) segments = [] for i in range(n): new_segment = [step * i, step * (i + 1)] segments.append(new_segment) # correct the gap in the missing index due to the truncated step segments[-1][-1] = r return segments
34f570933a5eb8772dc4b2e80936887280ff47a4
707,504
def _fill_three_digit_hex_color_code(*, hex_color_code: str) -> str: """ Fill 3 digits hexadecimal color code until it becomes 6 digits. Parameters ---------- hex_color_code : str One digit hexadecimal color code (not including '#'). e.g., 'aaa', 'fff' Returns ------- filled_color_code : str Result color code. e.g., 'aaaaaa', 'ffffff' """ filled_color_code: str = '' for char in hex_color_code: filled_color_code += char * 2 return filled_color_code
d91df947fcc5f0718bbd9b3b4f69f1ad68ebeff4
707,505
import ipaddress def ipv4_addr_check(): """Prompt user for IPv4 address, then validate. Re-prompt if invalid.""" while True: try: return ipaddress.IPv4Address(input('Enter valid IPv4 address: ')) except ValueError: print('Bad value, try again.') raise
e85681cdcedb605f47240b27e8e2bce077a39273
707,507
def namify(idx): """ Helper function that pads a given file number and return it as per the dataset image name format. """ len_data = 6 #Ilsvr images are in the form of 000000.JPEG len_ = len(str(idx)) need = len_data - len_ assert len_data >= len_, "Error! Image idx being fetched is incorrect. Invalid value." pad = '0'*need return pad+str(idx)
069ff7a297f944e9e0e51e5e100276a54fa51618
707,508
def mock_environ(): """Mock for `os.environ.copy`""" return {"SOME_ENV_VAR": "42"}
d68d44d793847f46354a8cf2503b654a40eed92a
707,509
def get_bedtools_coverage_cmd(bam_filename, gff_filename, output_filename, require_paired=False): """ Get bedtools command for getting the number of reads from the BAM filename that are strictly contained within each interval of the GFF. """ args = {"bam_filename": bam_filename, "gff_filename": gff_filename} # Do not include strandedness flag since that doesn't handle # paired-end cases intersect_cmd = "bedtools intersect -abam %(bam_filename)s " \ "-b %(gff_filename)s -f 1 -ubam " %(args) coverage_cmd = "%s | bedtools coverage -abam - -b %s -counts > %s" \ %(intersect_cmd, gff_filename, output_filename) return coverage_cmd
e4d6da3e3e7fe611c3bc3023bea3a76a0003a1f2
707,510
def seek_inactive(x, start, length, direction=-1, abstol=0): """ Seek inactive region to the left of start Example ------- >>> # _______ | >>> seek_inactive([3, 2, 1, 1, 1, 2, 3, 4, 2], start=7, length=3) (1, slice(2, 4)) When no sufficiently long sequence is found we return the end >>> # _ | >>> seek_inactive([3, 2, 1, 1, 1, 2, 3, 4, 2], start=7, length=5) (3, slice(0, 0)) """ end = -1 if direction == -1 else len(x) ind = start for i in range(start, end, direction): if abs(x[i] - x[ind]) > abstol: ind = i if abs(ind - i) >= length - 1: return x[ind], slice(ind, i, direction) if direction == 1: return x[-1], slice(-1, -1) else: return x[0], slice(0, 0)
a0029e0c145381b2acf57f77107d75d89c909b39
707,511
def get_duration_and_elevation(table): """"Return an array of duration and elevation gain from an html table""" try: hiking_duration = str(table.contents[0].text.strip()) #av.note: want this to be numeric except: hiking_duration = "" try: elevation_gain_ft = str( table.contents[2] .text.strip() .replace("ft", "") .replace(",", "") .replace("with three different ascents", "") .replace("with multiple ascents", "") .replace("with two ascents", "") .replace("with two different ascents", "") .strip() ) #av.note: want this to be numeric except: elevation_gain_ft = "" return hiking_duration, elevation_gain_ft
d52ca3c6e5d75ff936e44b452b05790db931dc6e
707,512
import os import time import sys def get_config(): """Get config from env vars. Return: dict: Keys are: policy_url, dane_id, policy_file_dir, crypto_path, policy_name, ssids. """ config = {} for x in ["policy_url", "policy_file_dir", "dane_id", "crypto_path", "policy_name", "app_uid", "roles", "trust_infile_path"]: config[x] = os.getenv(x.upper()) for k, v in config.items(): if v is None: print("Missing essential configuration: {}".format(k.upper())) if None in config.values(): time.sleep(30) sys.exit(1) return config
a2c69da96e2c8ac39230e6d1b277de8951a91abe
707,513
import os def unix_only(f): """Only execute on unix systems""" f.__test__ = os.name == "posix" return f
8f20070e75e0277341985c3d528311779aff47d1
707,514
def chunks(list_, num_items): """break list_ into n-sized chunks...""" results = [] for i in range(0, len(list_), num_items): results.append(list_[i:i+num_items]) return results
83da5c19c357cc996fc7585533303986bea83689
707,515
def form_requires_input(form): """ Returns True if the form has at least one question that requires input """ for question in form.get_questions([]): if question["tag"] not in ("trigger", "label", "hidden"): return True return False
97072a9edc494afa731312aebd1f23dc15bf9f47
707,516
def list_extract(items, arg): """Extract items from a list of containers Uses Django template lookup rules: tries list index / dict key lookup first, then tries to getattr. If the result is callable, calls with no arguments and uses the return value.. Usage: {{ list_of_lists|list_extract:1 }} (gets elt 1 from each item in list) {{ list_of_dicts|list_extract:'key' }} (gets value of 'key' from each dict in list) """ def _extract(item): try: return item[arg] except TypeError: pass attr = getattr(item, arg, None) return attr() if callable(attr) else attr return [_extract(item) for item in items]
23fb863a7032f37d029e8b8a86b883dbfb4d5e7b
707,517
def row2dict(cursor, row): """ タプル型の行データを辞書型に変換 @param cursor: カーソルオブジェクト @param row: 行データ(tuple) @return: 行データ(dict) @see: http://docs.python.jp/3.3/library/sqlite3.html """ d = {} for idx, col in enumerate(cursor.description): d[col[0]] = row[idx] return d
60e0ebed21c35a65784fe94fe5781f61fbe0c97d
707,518
def merge(left, right): """this is used for merging two halves """ # print('inside Merge ') result = []; leftIndex = 0; rightIndex = 0; while leftIndex < len(left) and rightIndex < len(right): if left[leftIndex] < right[rightIndex]: result.append(left[leftIndex]) leftIndex += 1 else: result.append(right[rightIndex]) rightIndex += 1 # print('merge', left, right) # print('result', result) # print('left elements ->', left[leftIndex:] + right[rightIndex:]) # Checking if any element was left return result + left[leftIndex:] + right[rightIndex:]
5b0012e102d72a93cf3ce47f9600b7dcef758a3b
707,519
import re def parse_query(query): """Parse the given query, returning a tuple of strings list (include, exclude).""" exclude = re.compile(r'(?<=-")[^"]+?(?=")|(?<=-)\w+').findall(query) for w in sorted(exclude, key=lambda i: len(i), reverse=True): query = query.replace(w, '') query = " " + query return re.compile(r'(?<=[+ ]")[^"]+?(?=")|(?<=[+ ])\w+').findall(query), exclude
4fe6aac76935af6e5acaa3aedad40d6bc635d4ff
707,520
import struct def read_plain_byte_array(file_obj, count): """Read `count` byte arrays using the plain encoding.""" return [file_obj.read(struct.unpack(b"<i", file_obj.read(4))[0]) for i in range(count)]
f300d205fda9b1b92ebd505f676b1f76122f994d
707,522
def big_number(int_in): """Converts a potentially big number into a lisible string. Example: - big_number(10000000) returns '10 000 000'. """ s = str(int_in) position = len(s) counter = 0 out = '' while position != 0: counter += 1 position -= 1 out = s[position] + out if counter % 3 == 0 and position != 0: out = " " + out return (out)
7db0dce8ffa1cbea736537efbf2fdd4d8a87c20d
707,523
def test_pandigital_9(*args): """ Test if args together contain the digits 1 through 9 uniquely """ digits = set() digit_count = 0 for a in args: while a > 0: digits.add(a % 10) digit_count += 1 a //= 10 return digit_count == 9 and len(digits) == 9 and 0 not in digits
ad5a738400f7b8a9bea001a13a76798633b9ac61
707,525
def _get_all_scopes(blocks): """Get all block-local scopes from an IR. """ all_scopes = [] for label, block in blocks.items(): if not (block.scope in all_scopes): all_scopes.append(block.scope) return all_scopes
daa13a20629dd419d08c9c6026972f666c3f9291
707,526
from datetime import datetime def get_equinox_type(date): """Returns a string representing the type of equinox based on what month the equinox occurs on. It is assumed the date being passed has been confirmed to be a equinox. Keyword arguments: date -- a YYYY-MM-DD string. """ month = datetime.strptime(date, '%Y-%m-%d').month if month == 3: return 'march' elif month == 9: return 'september' else: return None
06b65a54a0ccf681d9f9b57193f5e9d83578f0eb
707,527
def get_tally_sort_key(code, status): """ Get a tally sort key The sort key can be used to sort candidates and other tabulation categories, for example the status and tally collections returned by rcv.Tabulation().tabulate(). The sort codes will sort candidates before other tabulation categories; elected candidates before defeated candidates; elected candidates by increasing round of election, then by decreasing votes; defeated candidates by decreasing round of election, then by decreasing votes; any remaining ties are broken by the sort order of candidate names and labels for other tabulation categories. Arguments ========= code A string representing a candidate name or label of another tabulation category. status A dictionary of tabulation result statuses, as given by the second item of the return value from rcv.Tabulation().tabulate(). Returns ======= A sort key in the form of a tuple of integers and/or strings. """ sort_key = tuple([9, code]) if code in status: nbr_round = status[code].nbr_round votes = status[code].votes if status[code].status == 'elected': sort_key = (1, 1, nbr_round, -votes, code) else: sort_key = (1, 2, -nbr_round, -votes, code) else: sort_key = (2, code) # print('code =', code, ' sort_key =', sort_key) return sort_key
bd7d643300997903b84b1827174dd1f5ac515156
707,528
import difflib def getStringSimilarity(string1:str,string2:str): """ This function will return a similarity of two strings. """ return difflib.SequenceMatcher(None,string1,string2).quick_ratio()
292f552449569206ee83ce862c2fb49f6063dc9e
707,530
import torch def flipud(tensor): """ Flips a given tensor along the first dimension (up to down) Parameters ---------- tensor a tensor at least two-dimensional Returns ------- Tensor the flipped tensor """ return torch.flip(tensor, dims=[0])
b0fd62172b0055d9539b554a8c967c058e46b397
707,531
def get_file_type(filepath): """Returns the extension of a given filepath or url.""" return filepath.split(".")[-1]
070a1b22508eef7ff6e6778498ba764c1858cccb
707,532
import os def get_python_list(file_path): """ Find all the .py files in the directory and append them to a raw_files list. :params: file_path = the path to the folder where the to-be read folders are. :returns: raw_files : list of all files ending with '.py' in the read folder. """ python_files = [] for file in os.listdir(file_path): if file.endswith(".py"): python_files.append(file) print('\nThese are all the .py files inside the folder: \n') for i in python_files: print(i) return python_files
b771814b86c5405d5810694ad58b7a8fe80b1885
707,533
from typing import List from typing import Pattern import re from typing import Optional from typing import Match def _target_js_variable_is_used( *, var_name: str, exp_lines: List[str]) -> bool: """ Get a boolean value whether target variable is used in js expression or not. Parameters ---------- var_name : str Target variable name. exp_lines : list of str js expression lines. Returns ------- result : bool If target variable is used in js expression, True will be returned. """ var_pattern: Pattern = re.compile(pattern=rf'var ({var_name}) = ') used_pattern_1: Pattern = re.compile( pattern=rf'{var_name}[ ;\)\.}},\]\[]') used_pattern_2: Pattern = re.compile( pattern=rf'{var_name}$') for line in exp_lines: if '//' in line: continue if var_name not in line: continue match: Optional[Match] = var_pattern.search(string=line) if match is not None: continue match = used_pattern_1.search(string=line) if match is not None: return True match = used_pattern_2.search(string=line) if match is not None: return True return False
be07cb1628676717b2a02723ae7c01a7ba7364d6
707,537
def zip_equalize_lists(a, b): """ A zip implementation which will not stop when reaching the end of the smallest list, but will append None's to the smaller list to fill the gap """ a = list(a) b = list(b) a_len = len(a) b_len = len(b) diff = abs(a_len - b_len) if a_len < b_len: for _ in range(diff): a.append(None) if b_len < a_len: for _ in range(diff): b.append(None) return zip(a, b)
1cf5b9cadf4b75f6dab6c42578583585ea7abdfc
707,538
def find_period(samples_second): """ # Find Period Args: samples_second (int): number of samples per second Returns: float: samples per period divided by samples per second """ samples_period = 4 return samples_period / samples_second
c4a53e1d16be9e0724275034459639183d01eeb3
707,539
def sqrt(x: int) -> int: """ Babylonian Square root implementation """ z = (x + 1) // 2 y = x while z < y: y = z z = ( (x // z) + z) // 2 return y
1a91d35e5783a4984f2aca5a9b2a164296803317
707,540
def is_consecutive_list(list_of_integers): """ # ======================================================================== IS CONSECUTIVE LIST PURPOSE ------- Reports if elments in a list increase in a consecutive order. INPUT ----- [[List]] [list_of_integers] - A list of integers. Return ------ [BOOLEAN] - Returns true is a list is consecutive or false if the same number appears consecutively. # ======================================================================== """ for i in range(1, len(list_of_integers)): if list_of_integers[i] - list_of_integers[i - 1] != 1: return False return True
3b165eb8d50cc9e0f3a13b6e4d47b7a8155736b9
707,541
def parent_id_name_and_quotes_for_table(sqltable): """ Return tuple with 2 items (nameof_field_of_parent_id, Boolean) True - if field data type id string and must be quoted), False if else """ id_name = None quotes = False for colname, sqlcol in sqltable.sql_columns.iteritems(): # root table if not sqltable.root.parent and \ sqlcol.node == sqltable.root.get_id_node(): id_name = colname if sqlcol.typo == "STRING": quotes = True break else: # nested table if sqlcol.node.reference: id_name = colname if sqlcol.typo == "STRING": quotes = True break return (id_name, quotes)
6f3319dc6ae0ea70af5d2c9eda90fb1a9fb9daac
707,542
def clean_profit_data(profit_data): """清理权益全为0的垃圾结算日""" for i in list(range(len(profit_data)))[::-1]: profit = profit_data[i][1] == 0 closed = profit_data[i][2] == 0 hold = profit_data[i][3] == 0 if profit and closed and hold: profit_data.pop(i) return profit_data
d1b7fe9d747a1149f04747b1b3b1e6eba363c639
707,544
def mock_interface_settings_mismatch_protocol(mock_interface_settings, invalid_usb_device_protocol): """ Fixture that yields mock USB interface settings that is an unsupported device protocol. """ mock_interface_settings.getProtocol.return_value = invalid_usb_device_protocol return mock_interface_settings
61958439a2869d29532e50868efb39fe3da6c8b5
707,545
def MakeLocalSsds(messages, ssd_configs): """Constructs the repeated local_ssd message objects.""" if ssd_configs is None: return [] local_ssds = [] disk_msg = ( messages. AllocationSpecificSKUAllocationAllocatedInstancePropertiesAllocatedDisk) interface_msg = disk_msg.InterfaceValueValuesEnum for s in ssd_configs: if s['interface'].upper() == 'NVME': interface = interface_msg.NVME else: interface = interface_msg.SCSI m = disk_msg( diskSizeGb=s['size'], interface=interface) local_ssds.append(m) return local_ssds
128e7a0358221fe3d93da4726924a7a783c65796
707,547
import base64 def _b64urldec(input: str) -> bytes: """ Deocde data from base64 urlsafe with stripped padding (as specified in the JWS RFC7515). """ # The input is stripped of padding '='. These are redundant when decoding (only relevant # for concatenated sequences of base64 encoded data) but the decoder checks for them. # Appending two (the maximum number) of padding '=' is the easiest way to ensure it won't choke # on too little padding. return base64.urlsafe_b64decode(input + '==')
fb535072b560b8565916ae8ec3f32c61c41115d8
707,548
import os def is_morepath_template_auto_reload(): """ Returns True if auto reloading should be enabled. """ auto_reload = os.environ.get("MOREPATH_TEMPLATE_AUTO_RELOAD", "") return auto_reload.lower() in {"1", "yes", "true", "on"}
72839bf2ab0a70cefe4627e294777533d8b0087f
707,549
def relabel_prometheus(job_config): """Get some prometheus configuration labels.""" relabel = { 'path': '__metrics_path__', 'scheme': '__scheme__', } labels = { relabel[key]: value for key, value in job_config.items() if key in relabel.keys() } # parse __param_ parameters for param, value in job_config.get('params', {}).items(): labels['__param_%s' % (param,)] = value return labels
eb08f617903fe66f462a5922f8149fd8861556ad
707,550
def checkGroup(self, group, colls): """ Args: group: colls: Returns: """ cut = [] for elem in group: if elem in colls: cut.append(elem) if len(cut) == len(group): return cut else: return []
ca30648c536bcf26a1438d908f93a5d3dcc131c9
707,551
import sys def _replace_sysarg(match): """Return the substitution for the $<n> syntax, .e.g. $1 for the first command line parameter. """ return sys.argv[int(match.group(1))]
efd338c537ecf2ef9113bc71d7970563ac9e5553
707,552
def decode(rdf, hint=[]): """Decode ReDIF document.""" def decode(encoding): rslt = rdf.decode(encoding) if rslt.lower().find("template-type") == -1: raise RuntimeError("Decoding Error") return rslt encodings = hint + ["windows-1252", "utf-8", "utf-16", "latin-1"] if rdf[:3] == b"\xef\xbb\xbf": encodings = ["utf-8-sig"] + encodings for enc in encodings: try: return decode(enc) except Exception: continue raise RuntimeError("Decoding Error")
f42eed2caaba90f4d22622643885b4d87b9df98b
707,553
def text(el): """ Helper to get the text content of a BeautifulSoup item """ return el.get_text().strip()
7b34c77c79677a73cc66532fe6305635b1bdac43
707,554
def get_sha512_manifest(zfile): """ Get MANIFEST.MF from a bar file. :param zfile: Open (!!!) ZipFile instance. :type zfile: zipfile.ZipFile """ names = zfile.namelist() manifest = None for name in names: if name.endswith("MANIFEST.MF"): manifest = name break if manifest is None: raise SystemExit return manifest
7ef150bb3e89f8723649ee983085a413ec8a31df
707,555
def part1(data): """ >>> part1(read_input()) 0 """ return data
1482c41b112a3e74775e71c4aabbd588de2b6553
707,556
import torch def get_rectanguloid_mask(y, fat=1): """Get a rectanguloid mask of the data""" M = y.nonzero().max(0)[0].tolist() m = y.nonzero().min(0)[0].tolist() M = [min(M[i] + fat, y.shape[i] - 1) for i in range(3)] m = [max(v - fat, 0) for v in m] mask = torch.zeros_like(y) mask[m[0] : M[0], m[1] : M[1], m[2] : M[2]] = 1 return mask
0ff3ab25f2ab109eb533c7e4fafd724718dbb986
707,557