content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def augmentCrlStatus(msg): """Augment CRL message with status of ``COMPLETE`` or ``INCOMPLETE``. Check the CRL message for completeness. Will add a ``status`` field to the message with a value of ``COMPLETE`` or ``INCOMPLETE``. ``COMPLETE`` messages meet the following criteria: - Doesn't have an overflow (i.e. has more the 138 reports). - Has no reports. - All reports are in the database (including both text and graphics if indicated). Args: msg (dict): CRL message dictionary to be augmented. Returns: dict: ``msg`` dictionary with new ``status`` field indicating completeness. """ reports = msg['reports'] # A no document report is a complete report. if len(reports) == 0: msg['status'] = 'COMPLETE' return msg # An overflow CRL is also incomplete if ('overflow' in msg) and (msg['overflow'] == 1): msg['status'] = 'INCOMPLETE' return msg for reportNumber in reports: if not ('*' in reportNumber): msg['status'] = 'INCOMPLETE' return msg msg['status'] = 'COMPLETE' return msg
bd35db88259f2b4e1dad08a98973ef5413829542
21,666
def check_if_hits(row, column, fleet): """ This method checks if the shot of the human player at the square represented by row and column hits any of the ships of fleet. :param row: int :param column: int :param fleet: list :returns result: bool - True if so and False otherwise """ result = False for i in range(len(fleet)): # check if guess already in hits set: if (row, column) in fleet[i][4]: break for j in range(fleet[i][3]): # if horizontal if fleet[i][2]: if row == fleet[i][0] and column == fleet[i][1] + j: result = True break # if vertical else: if row == fleet[i][0] + j and column == fleet[i][1]: result = True break return result
2f62285d68c190b63a0934698cc209a59f15d445
21,668
def get_codon_dictionary(codon_dictionary, string): """ Returns a dictionary with codon as key and its frequency as value """ # iterates on every 3th item; remove remainders by: len(string) - len(string) % 3 for i in range(0, len(string) - len(string) % 3, 3): codon = "{first}{second}{third}".format(first=string[i], second=string[i+1], third=string[i+2]) if codon in codon_dictionary: codon_dictionary[codon] += 1 else: codon_dictionary[codon] = 1 return codon_dictionary
32301f29b580d04f967d3714fae762b680c6049e
21,672
def concat_fm(fm): """ Concatenates Directional feature maps as shown in original paper. This function is used for visualization purposes only. :param fm: 12 ltrp feature maps :return: list of 4 concatenated feature maps """ d1 = fm[0]+fm[1]+fm[2] d2 = fm[3]+fm[4]+fm[5] d3 = fm[6]+fm[7]+fm[8] d4 = fm[9]+fm[10]+fm[11] return [d1,d2,d3,d4]
bb6d6b1f9c6d0441be78e1b04d31e1eb24a6ce28
21,676
def _bound(color_component: float, minimum: float=0, maximum: float=255) -> float: """ Bound the given color component value between the given min and max values. The minimum and maximum values will be included in the valid output. i.e. Given a color_component of 0 and a minimum of 10, the returned value will be 10. """ color_component_out = max(color_component, minimum) return min(color_component_out, maximum)
15f335051d228069e7ca0be3c0768e99b009b3e6
21,677
def orange(text): """ Return this text formatted orange (olive) """ return '\x0307%s\x03' % text
aff6e5495b8e9c8f8b763539c6d9308236e313fe
21,678
def position(table, _seat, postflop=False): """ Returns how many seats from the button the seat is. """ # Raise an exception if the button is not set if postflop: seats = table.get_players(hascards=True) else: seats = table.get_players() return len(seats) - seats.index(_seat) - 1
253b7685a42db9c4b344f8492fdd9710c8f24585
21,681
def fact(number): """ Calculating the factorial of a number """ result = 1 for number in range(1, number + 1): result *= number return result
59f766f57ca71c487b640cd6d08841884657c8fd
21,692
import random def one_run(wr: float, fr: float, ranks: dict, start_rank: int, end_rank: int, max_battles: int) -> int: """ Simulate battles required to complete ranked season. :param wr: win rate :param fr: first place rate :param ranks: information on stars in each rank :param start_rank: initial rank for the simulation :param end_rank: final rank (simulation ends when reaching this rank) :param max_battles: maximum battles before simulation ends (prevents infinite loops). :return: number of battles """ battles = 0 stars = 0 simulated_rank = start_rank while simulated_rank != end_rank: battles += 1 if battles > max_battles: break # Determine battle outcome if random.random() < wr: stars += 1 # win elif random.random() >= fr: # best player doesn't lose star stars -= 1 # loss and no star saved # Check if player moved up a rank if stars == ranks[simulated_rank]['stars']: simulated_rank -= 1 # move "up" a rank if ranks[simulated_rank]['free-star']: stars = 1 # get a free star for next rank else: stars = 0 # no free star # Check if a player moved down a rank if stars < 0: if ranks[simulated_rank]['irrevocable']: stars = 0 else: simulated_rank += 1 # move "down" a rank stars = ranks[simulated_rank]['stars'] - 1 # 1 star away from next rank return battles
fb655a374696430d80668ae79423b7400cfc94c8
21,696
def _shift_parts(family_parts, subfamily_parts, stop_fn): """Iterate over subfamily parts, removing from subfamily and appending to family, until stop_fn(part) returns true. If subfamily_parts is empty, add 'Regular'. This works because for both original and wws subfamilies the order of parts is such that all parts that fail the stop_fn precede any that pass. Does not modify the input parts lists.""" result_family_parts = family_parts[:] limit = len(subfamily_parts) i = 0 while i < limit: part = subfamily_parts[i] if stop_fn(part): break result_family_parts.append(part) i += 1 result_subfamily_parts = subfamily_parts[i:] if not result_subfamily_parts: result_subfamily_parts.append('Regular') return result_family_parts, result_subfamily_parts
29c45bcaf667e42d5edf1602302649fb58661359
21,701
from typing import Dict import gzip def read_fasta(filename: str) -> Dict[str, str]: """ Reads a file containing multiple FASTA sequences and returns a dictionary of the header: sequence :param str filename: should be the name of the open file to be read :return: dict containing the header: the sequence """ seq_dict = {} with gzip.open(filename, 'rt') as fc: all_lines = str(fc.read()) seqs = all_lines.split('>') for seq in seqs[1:]: seq = seq.strip('"').split('\n') ref_id, prot_name = seq[0].replace(' [Homo sapiens]', '').split(' ', 1) if "NP_" in ref_id: seq_dict[ref_id] = ''.join(seq[1:]) return seq_dict
2e14ec124ec0a9c3273fe8f6d3d23da0a7da9b2a
21,702
def char_replacement(list_smiles): """ Replace the double characters into single character in a list of SMILES string. Parameters ---------- list_smiles: list list of SMILES string describing a compound. Returns ------- list list of SMILES with character replacement. """ return [ smile.replace("Cl", "L") .replace("Br", "R") .replace("Se", "E") .replace("Zn", "Z") .replace("Si", "T") .replace("@@", "$") for smile in list_smiles ]
b0a7e2a09cb966b826ee1cb9cf5afa734a2d2ed1
21,704
def _edge_match(edge_a_attr, edge_b_attr): """ Compares attributes of the edges for equality. :param edge_a_attr: Attributes of first edge. :param edge_b_attr: Attributes of second edge. :return: True is equal - otherwise False """ if edge_a_attr == edge_b_attr: return True return False
0afa7dd6402f1c954753c27e0ab4244740eb5ffe
21,708
from typing import List def _encode_strings(strings: List[str]) -> List[bytes]: """ Encodes a list of strings as bytes Parameters ---------- strings : list List of any-codification strings Returns ------- strings: list List of ASCII encoded bytes """ return [s.encode("ascii", "ignore") for s in strings]
58e4aafa7aca4a65f1d62fde37023f7f1a638b33
21,710
def box_sizing(keyword): """Validation for the ``box-sizing`` property from css3-ui""" return keyword in ('padding-box', 'border-box', 'content-box')
42666cee49ba77d3089633a923872d2064e8f080
21,711
import random def RANDBETWEEN(low, high): """ Returns a uniformly random integer between two values, inclusive. """ return random.randrange(low, high + 1)
6382801f41af2b05304b7fbf1c8d22b6f10b90a8
21,712
import re def unindent(value): """Remove indentation from string.""" return re.sub("\n +", "\n", value)
f455d1d7b24d708e73a0307af6ee333cfffe91f2
21,713
from typing import List from typing import Tuple def bono_tasa_fija( start_time: float, yf: float, num_cupones: int, valor_tasa: float) -> List[Tuple[float, float]]: """ Retorna los plazos y flujos de un bono a tasa fija bullet con nominal = 1. params: - start_time: fecha (expresada en fracción de año) en que comienza el devengo del primer cupón. - yf: fracción de año que representa la periodicidad del bono (yf = .5 -> bono semestral). - num_cupones: número de cupones del bono - valor_tasa: valor de la tasa fija del bono. Los intereses se calculan de forma lineal. return: - Una `list` de `tuple` con la fecha de pago del cupón (como instante de tiempo) y el monto del cupón. """ result = [] nominal = 100.0 flujo = nominal * valor_tasa * yf for i in range(1, num_cupones + 1): if i == num_cupones: flujo += nominal result.append((i * yf + start_time, flujo)) return result
a3d792762458b7b30facf7ead74e2899e545425f
21,716
from datetime import datetime def find_todays_file(dir_list=None): """Identifies filename of most recent Sierra report""" fhdate = datetime.strftime(datetime.now(), '%Y%m%d') if dir_list is not None: # grab the latest file with date for fh in sorted(dir_list, reverse=True): if 'BookOpsQC.{}'.format(fhdate) in fh: return fh return None
789040c743375bc85c5ac2f8eaed4fec11c46158
21,718
def format_location(text): """Replace all the spaces with plus signs.""" return text.replace(' ', '+')
e95002ae4b385e346628cf363f27b4db2475465d
21,721
def url_remove_user_pwd(url): """ Given a URL, remove the username and password if any:: print(url_remove_user_pwd("https://user:password@host:port/path")) https://host:port/path """ _url = url.scheme + "://" + url.hostname if url.port: _url += ":%d" % url.port if url.path: _url += url.path return _url
0713a0973e6fac666198462145eab3d15179b6e0
21,728
def _remove_unwanted(args): """ Lower cases tokens and removes numbers and possibly names. Parameters ---------- args : list of tuples The following arguments zipped. text : list The text to clean. words_to_ignore : str or list Strings that should be removed from the text body. stop_words : str or list Stopwords for the given language. Returns ------- text_words_removed : list The text without unwanted tokens. """ text, words_to_ignore, stop_words = args return [ token.lower() for token in text if token not in words_to_ignore and token not in stop_words ]
3031b24e4581adf3ed701c999e995e2779e48cf2
21,733
def merge_none(a, b): """ Compare two sequences elementwise and merge them discarding None entries. Raises ValueError exception if the two sequances do not have the same length or if they have different non-None elements. Parameters ---------- a, b : sequences The sequences to be compared. Example ------- >>> merge_none([1,None,3],[None,2,3]) [1, 2, 3] """ if a is b is None: return None if len(a) != len(b): raise ValueError('The input sequences do not have the same length.') if any(p != q for p, q in zip(a, b) if None not in (p, q)): raise ValueError('The input sequences have incompatible values.') return tuple(p if p is not None else q for p, q in zip(a, b))
e25747cb2c8aeaa647ed9f3b19fbf311a7b0b701
21,738
def _identifier_split(identifier): """Return (name, start, end) string tuple from an identifier (PRIVATE).""" id, loc, strand = identifier.split(":") start, end = map(int, loc.split("-")) start -= 1 return id, start, end, strand
9fa8f1850fe628b7a26c5813b6d738e72c0d7ae5
21,741
import re import time import requests def check(url, regexp=None): """ Make a get request to a given url and return some metrics about the request. If the regexp parameter is present, check if this regular expression is present within the page returned by the request. :param url: url to be checked. if it does not start with http:// or https://, it will be prefixed with http:// :param regexp: (compiled) regular expression, or None :return: if connection is successful, it returns a dictionary with the following keys: 'timestamp': of the check, 'url': the actual url checked (which may have been prefixed with 'http://'), 'response_time': in seconds, see below note 'status_code': from the response, as string 'matched': True/False if the regular expression was matched within the page returned by the url if connection is unsuccessful, it returna a dictionary with the following keys: 'timestamp': of the check, 'url': the actual url checked (which may have been prefixed with 'http://'), 'error_msg': the message explaining the connection failure Note that as the HTTP response time, the "elapsed" time provided by the request library is used, that is, the amount of time elapsed between sending the request and the arrival of the response (as a timedelta). This property specifically measures the time taken between sending the first byte of the request and finishing parsing the headers. It is therefore unaffected by consuming the response content or the value of the stream keyword argument. See also https://2.python-requests.org/en/master/api/#requests.Response.elapsed """ if isinstance(regexp, str): regexp = re.compile(regexp) if not (url.startswith('http://') or url.startswith('https://')): url = 'http://' + url timestamp = time.time() metrics = { 'timestamp': timestamp, 'url': url, } try: resp = requests.get(url) metrics['response_time'] = resp.elapsed.total_seconds() metrics['status_code'] = str(resp.status_code) if regexp: metrics['matched'] = bool(regexp.search(resp.text)) except requests.exceptions.RequestException as e: # we catch with this all exceptions explicitly raised from requests metrics['error_msg'] = "connection error" #str(e) return metrics
879069e2763e3be793e77bdcc540005adcec1435
21,750
def partition(arr, low, high): """ Partition is a helper function for the quicksort. It takes a pivot and places lower values to the left and higher values to the right """ i = (low-1) pivot = arr[high] for j in range(low, high): if arr[j] <= pivot: i = i+1 arr[i], arr[j] = arr[j], arr[i] arr[i+1], arr[high] = arr[high], arr[i+1] return (i+1)
1bacd1c407542087bd161253e129f4fff098f05d
21,762
def deep_update_dict(origin_dict, override_dict): """ update origin dict with override dict recursively e.g. origin_dict = {'a': 1, 'b': {'c': 2, 'd': 4}} override_dict = {'b': {'c': 3}} return: {'a': 1, 'b': {'c': 3, 'd': 4}} """ for key, val in override_dict.items(): if isinstance(val, dict): tmp = deep_update_dict(origin_dict.get(key, {}), val) origin_dict[key] = tmp else: origin_dict[key] = override_dict[key] return origin_dict
09c0f8bb1656aef387a4a8a96f756a520d1dc23d
21,766
def get_coordinates_from_kml(coordinates): """Returns list of tuples of coordinates. Args: coordinates: coordinates element from KML. """ if coordinates: return [tuple(float(x.strip()) for x in c.split(',')) for c in str(coordinates[0]).split(' ') if c.strip()]
921741e0e157a7d635f59cf75800fb32fcdd4ba2
21,769
def isArray(v, N=None): """Check if v is an array or a vector, with optional size. Examples -------- >>> import pygimli as pg >>> print(pg.isArray([0, 1])) True >>> print(pg.isArray(np.array(5))) True >>> print(pg.isArray(pg.Vector(5))) True >>> print(pg.isArray(pg.Vector(5), N=5)) True >>> print(pg.isArray(pg.Vector(5), N=2)) False >>> print(pg.isArray('foo')) False """ if N is None: return hasattr(v, '__iter__') and not isinstance(v, (str)) return isArray(v) and len(v) == N
32b05b6810a9cfc2d97fbfcfdbdc2da0c1b47104
21,770
def filter_collections_exist(hsp, collection_names): """ Filters a list of collections to return only those that do exist """ filtered = [] for entry in hsp.collections.apiget('list'): if entry['name'] in collection_names: filtered.append(entry['name']) return filtered
54122ca77d6cb4acd5bd1492d1dcd15d6406c1a7
21,773
def anagram_solution_1(s1, s2): """ 解法一:冒泡检查 检查第一个字符串是不是出现在第二个字符串中,如果可以检验到每一个字符, 那这两个字符串一定是乱序。 可以通过用 None 替换字符来了解一个字符是否完成检查。 但是,由于 Python 字符串是不可变的,所以第一步是将第二个字符串转换为列表。 检查第一个字符串中的每个字符是否存在于第二个列表中,如果存在,替换成 None。 T = O(n^2) """ if len(s1) != len(s2): return False alist = list(s2) pos1 = 0 still_ok = True while pos1 < len(s1) and still_ok: pos2 = 0 found = False while pos2 < len(alist) and not found: if s1[pos1] == alist[pos2]: found = True else: pos2 = pos2 + 1 if found: alist[pos2] = None else: still_ok = False pos1 = pos1 + 1 return still_ok
e45ba0e0607c57a9b21ca1dd59a86b98dcac6f89
21,776
def has_argument(command: str) -> bool: """ Check if command has an argument. This is helper function for process_command :param command: an alphabetic string :precondition: command must be an alphabetic string and part of the list returned by get_command_list :postcondition: returns True if command has an argument, else False :return: True if command has an argument, otherwise False >>> has_argument("h") False >>> has_argument("b") True """ commands_dictionary = { "q": False, "h": False, "b": True, "s": True, "i": True, "c": True } return commands_dictionary[command]
50bc475fa910ab0637f19bb07819fcc4fb78e325
21,780
from datetime import datetime def convert_date(timestamp): """Converts API timestamp to publication-ready dateline""" day = timestamp[5:7] month = datetime.strptime(timestamp[8:11], '%b').strftime('%B') year = timestamp[12:16] date = month + ' ' + day + ", " + year return date
661e91a9c037d65db7ea9621bb47e0230a833c31
21,782
def _check_before_comment(commented_map, key_to_check, first=False): """Check if commented_map has a comment before key_to_check or not. All our default comments are before a key, so we just check for that. :param commented_map: :type commented_map: ruamel.yaml.comments.CommentedMap :param key_to_check: :type key_to_check: str :param first: True if the key is the first key in the yaml file, as that comment is associated with the file and not with the key. :type first: bool :return: True if there is a comment before a key. :rtype: bool """ if first: # In the first case, the comment is associated to the CommentedMap, and not to the key. comments_list = commented_map.ca.comment if not comments_list: return False # This is the comment structure in ruamel. They don't have any good method for us to check. return len(comments_list) > 1 and comments_list[1] and len(comments_list[1]) > 0 else: comments_dict = commented_map.ca.items if not comments_dict: return False if key_to_check in comments_dict: comments_list = comments_dict[key_to_check] # This is the comment structure in ruamel. They don't have nay good method for us to check. if len(comments_list) > 1 and comments_list[1] and len(comments_list[1]) > 0: return True else: return False else: # No key exists, so no comments. return False
92997028120026e3ae604704510c082f9b201543
21,783
import re def is_comment(line): """Determines if a string is entirely a fortran comment.""" return bool(re.match('\A\s*!', line))
942bf9b780f7c890c75a18aac0a4380d96825c04
21,789
def header(table): """ Return the header row for the given table. E.g.:: >>> from petl import header >>> table = [['foo', 'bar'], ['a', 1], ['b', 2]] >>> header(table) ['foo', 'bar'] See also :func:`fieldnames`. """ it = iter(table) return next(it)
c4e772b40cdda2ffcbaf383dc4787f2e146a97e1
21,791
import six import base64 def is_base64(s): """ is_base64 tests whether a string is valid base64 by testing that it round-trips accurately. This is required because python 2.7 does not have a Validate option to the decoder. """ try: s = six.ensure_binary(s, "utf-8") return base64.b64encode(base64.b64decode(s)) == s except Exception as e: return False
8c869fd96217e70dd896d8309719d3b0c754c388
21,793
def _n2(a, b): """Return (a - b).evalf(2) if a and b are comparable, else None. This should only be used when a and b are already sympified. """ # /!\ it is very important (see issue 8245) not to # use a re-evaluated number in the calculation of dif if a.is_comparable and b.is_comparable: dif = (a - b).evalf(2) if dif.is_comparable: return dif
bb1c3ebcea8af5ddf248bd90ef9810093840bbcf
21,795
def create_non_dupe(base_name: str, opt_num: int, comparison) -> str: """Makes sure base_name is not in comparison, and if it is it's renamed. :param base_name: Name to check/make unique. :param opt_num: Number of the option base_name belongs to, used in making it unique. :param comparison: Dictionary or set to search for base_name in. :return: Unique name. """ h = base_name if h in comparison: n = 0 h = h + '_O' + str(opt_num) h_end = len(h) while h in comparison: h = h[:h_end] + '_' + str(n) n += 1 return h
cb12092838d6e0482f28b7b00682a3390fcac790
21,797
def get_min_value_from_matrix(matrix_filename): """ Returns the minimum value of a matrix file :param matrix_filename: str :rtype: float """ matrix = [] with open(matrix_filename) as file: for line in file: matrix.extend([float(val) for val in line.rstrip().split()]) return min(matrix)
69413010834b4e4fb903e164e41677619ac88bb3
21,799
def interpret_instruction(instruction, parameter): """ Interprets an instruction and returns offset to next command and accumulator value. :param instruction: acc, jmp or nop :param parameter: signed integer :return: (jump_offset, accumulator_offset) """ if instruction == 'acc': return 1, parameter elif instruction == 'jmp': return parameter, 0 else: return 1, 0
4f1b0ba7b1d92256e299a3f2dab9eb6c42a5314b
21,800
import re def _contains_expression(repr_str: str) -> bool: """ Checks whether or not a `repr_str` contains an expression. (Single unary expressions are excluded) """ repr_str = re.sub(r"\s+", "", repr_str) repr_str = repr_str.replace("(", "") repr_str = repr_str.replace(")", "") symbols = re.findall(r"[\w']+", repr_str) non_symbols = re.findall(r"[^\w']+", repr_str) if len(non_symbols) == 0: return False if len(non_symbols) == 1 and len(symbols) == 1: return False return True
af758c8874d22ea1cfa25bb73c7605ed5e4d2d75
21,801
def issafe(arg): """Returns False if arg contains ';' or '|'.""" return arg.find(';') == -1 and arg.find('|') == -1
f6746d5290e21eb84d7343792d277bce4c1871ff
21,804
def _bisect_blocks(web3, timestamp, use_left_bound=True): """ Perform a binary search on the blockchain for the block that matches the given timestamp. The `use_left_bound` parameter determines whether to return the block to the left or right of the timestamp in the event that no block matches the timestamp exactly. """ left_bound = 1 right_bound = web3.eth.blockNumber left_block = web3.eth.getBlock(left_bound) if left_block['timestamp'] >= timestamp: return 'earliest' right_block = web3.eth.getBlock(right_bound) if right_block['timestamp'] <= timestamp: return 'latest' while left_bound < right_bound - 1: middle = (left_bound + right_bound) // 2 middle_block = web3.eth.getBlock(middle) if middle_block['timestamp'] < timestamp: left_bound = middle elif middle_block['timestamp'] > timestamp: right_bound = middle else: return middle else: if use_left_bound: return left_bound else: return right_bound
9eb011ca488b7262e78efd29fe11f3c0136a5933
21,809
def chunks(l, n): """ Successive n-sized chunks from l. """ res = [] for i in range(0, len(l), n): assert len(l[i:i + n]) == n res += [l[i:i + n]] return res
148467d681e545487ea1a52c3b4d548726c77f6c
21,815
def update_max_speed(driver, speed): """ Updates the max speed of the driver :param driver: driver :param speed: new max speed :type driver: DriverProfile :return: updated driver profile """ return driver.update_max_speed(speed)
73987703b584099538d865dde9d7d25f48080283
21,817
import torch def evaluate_nll(confidences: torch.Tensor, true_labels: torch.Tensor) -> float: """ Args: confidences (Tensor): a tensor of shape [N, K] of predicted confidences. true_labels (Tensor): a tensor of shape [N,] of ground truth labels. Returns: nll (float): average negative-log-likelihood of predictions. """ nll = torch.nn.functional.nll_loss( torch.log(1e-12 + confidences), true_labels ).item() return nll
d1c5aa5d69e788ee835d8b94b6dd9d6895656e53
21,831
def train_test_column_split(x, y, df_column): """Function for splitting dataset into train/test partitions w.r.t. a column (pd.Series). Args: x (pd.DataFrame): DataFrame containing predictors. y (pd.DataFrame): DataFrame containing target variable. df_column (pd.Series): Series for train/test split, assuming it is contained in x. Returns: tuple: (x_train, x_test, y_train, y_test). A tuple of partitions of the initial dataset. """ x1, y1, col_name = x.copy(), y.copy(), df_column.name y1[col_name] = df_column return (x1[x1[col_name] == 'train'].drop(col_name, axis=1), x1[x1[col_name] == 'test'].drop(col_name, axis=1), y1[y1[col_name] == 'train'].drop(col_name, axis=1), y1[y1[col_name] == 'test'].drop(col_name, axis=1))
a28b536be57e04870ae9e6f1e1abc854838e24ae
21,833
def triplo(n=0): """ -> Calcula o triplo de um número :param n: número :return: triplo do número """ return n * 3
e612051944215b2491958005cadaac9eff5ba3b6
21,835
def calculateXVertice(termA, termB): """ Calculates the value of the vertice X. """ vertice = ((-termB)/(2*termA)) return vertice
aafeb34baad2c5361ad6f1c49088984708cdffee
21,838
def _search(left, right, predicate): """Simple binary search that uses the ``predicate`` function to determine direction of search""" if right >= left: mid = left + (right - left) // 2 res = predicate(mid) if res == 0: return mid elif res > 1: return _search(left, mid - 1, predicate) else: return _search(mid + 1, right, predicate) else: return -1
80a8ae96468fa4a13e42dc5b45920123c1eb8833
21,841
def getPadding(sField, cchWidth): """ Returns space padding for the given field string. """ if len(sField) < cchWidth: return ' ' * (cchWidth - len(sField)); return '';
895d3cde1daf0045cb434cc2fcdef6c9ca8808ff
21,843
def get_data_file_path_list(data_file_list_path): """Get mappting of video id to sensor data files. video id to (original video id, [file 1, ..., file n]) where file 1, ..., file n are one series data. Note. - Original video id in input files sometimes mixed lower and upper case. - To treat several case, lower case of them are used as key of mapping. """ mapping = {} with open(data_file_list_path) as f_in: for line in f_in.readlines(): if line.strip() == '': continue video_name_prefix, files_txt = line.strip().split('\t') mapping[video_name_prefix.lower()] = ( video_name_prefix, files_txt.split(',')) return mapping
7655c27105b1ce051bf1942655daabc2dfce9bd0
21,851
def snake_case_to_pascal_case(input_string): """ Converts the input string from snake_case to PascalCase :param input_string: (str) a snake_case string :return: (str) a PascalCase string """ input_list = input_string.split('_') input_list = [i.capitalize() for i in input_list] output = ''.join(input_list) return output
6c6344fb052dc6c1a712b838d58266ce8ea9b5c0
21,854
import socket def check_port(ip, port, timeout=None): """ Checks if the port is open on a specific IP @param ip: IP of the remote host @param port: The port to check @param timeout: Timeout, in seconds @return bool: True if the port is open, False if closed """ socket_port = socket.socket() if timeout is not None: socket_port.settimeout(timeout) try: socket_port.connect((ip, int(port))) except socket.error: return False else: socket_port.close() return True
43a4696ca002f96e9b6c28d67326dd4c0c285e5e
21,860
def make_collision_handler(collider, maplayer): """Returns ``f = collider.collide_map(maplayer, ...)`` Returns: f : ``(last, new, vx, vy)`` -> ``(vx, vy)`` Utility function to create a collision handler by combining Arguments: maplayer : tells the objects to collide with. collider : tells how velocity changes on collision and resolves actual collisions. """ def collision_handler(last, new, vx, vy): return collider.collide_map(maplayer, last, new, vx, vy) return collision_handler
df59b7eb4b74fe803f1b13fcac7dabc29e06a62b
21,862
def conv_out_shape(in_shape, layers): """ Calculates output shape of input_shape going through a list of pytorch convolutional layers in_shape: (H, W) layers: list of convolution layers """ shape = in_shape for layer in layers: h_out = ((shape[0] + 2*layer.padding[0] - layer.dilation[0] * (layer.kernel_size[0] - 1)-1) / layer.stride[0])+1 w_out = ((shape[1] + 2*layer.padding[1] - layer.dilation[1] * (layer.kernel_size[1] - 1)-1) / layer.stride[1])+1 shape = (int(h_out), int(w_out)) return shape
b75fb479f47304be03aef20a36583ad8a2edc0de
21,870
import collections import json def load_json(files): """Load all json files as a list of dictionaries""" config = [] for file in files: with open(file, 'r') as data_file: config.append(collections.OrderedDict(json.load(data_file))) return config
63174b3fd1ce208a347c6cf7b4904873a97e7136
21,871
from datetime import datetime def print_start_time(message: str) -> datetime: """Print start time. Args: message (str): Message to print. Returns: start (datetime): Start time. """ start = datetime.now() print(f'{message} {start}') return start
a8f7f07da5c72bea88cf8e48a8d3f651a7662e0e
21,875
def ispalindrome(s): """ Returns true if s is a palindrome There are two ways to define a palindrome: 1. s is a palindrome if it reads the same backward and forward. 2. s is a palindrome if either (1) its length is <= 1 OR (2) its first and last chars are the same and the string between them is a palindrome. Letters must match exactly. Parameter s: the candidate palindrome Precondition s is a string """ assert type(s) == str, repr(s) + ' is not a string' # get in the habit # Base palindrome if len(s) < 2: return True # s has at least 2 characters ends = s[0] == s[-1] middle = ispalindrome(s[1:-1]) # Both must be true to be a palindrome return ends and middle
5ad5aaef785be935ab6494cf0a1653a265b680ab
21,887
def prefer_insertions_at_309_and_315(mb): """Prefer alternatives that include 309.1C or 315.1C over others. There are two multi-C runs at the beginning of the 300's and by convention, any insert in one of those runs is pushed to the end. Mostly, the other rules will pick these, but in some circumstances with other substitutions or deletions in this area, these won't get picked - although we want them to. Thus, this special case preference. """ special_cases = ['309.1C', '315.1C'] # mb: mismatch block if len(mb) > 1: scores = [0] * len(mb) # pos: position # aa: alternate alignment for pos, aa in enumerate(mb): for variant in aa: if str(variant) in special_cases: scores[pos] += 1 if max(scores) > 0: # lsi: indices of lower scoring alternate alignments lsi = list(x for x in range(len(mb)) if scores[x] < max(scores)) # remove low scoring alignments from the mismatch block # in reverse order so as to not mess up the preceding indices lsi.sort(reverse=True) for i in lsi: mb.pop(i) return mb
92152d8de90617ce4a21da680bd34e96d7df98cc
21,896
def get_patch_centered(img, x, y, radius): """ Extracts a patch from an image centered at (x, y) with a given radius. """ return img[x - radius : x + radius + 1, y - radius : y + radius + 1]
6ad8c8ee8b737fcbe036c8d5a1fb0b00a76e4014
21,899
def is_for_ast_eval(test_str: str): """ Is the test string a valid list of dict string, such as "[1, 2]", that can be evaluated by ast eval. Arguments: test_str (str): Test string Returns: bool: Is test_str a valid list of dict strings """ return ('[' in test_str and ']' in test_str) or \ ('{' in test_str and '}' in test_str)
e3f30a6b27f9d66e91a2c122cc9b3fc5ae1f948f
21,905
def _ap(relevances, scores, topn=None): """Returns the average precision (AP) of a single ranked list. The implementation here is copied from Equation (1.7) in Liu, T-Y "Learning to Rank for Information Retrieval" found at https://www.nowpublishers.com/article/DownloadSummary/INR-016 Args: relevances: A `list` of document relevances, which are binary. scores: A `list` of document scores. topn: An `integer` specifying the number of items to be considered in the average precision computation. Returns: The MAP of the list as a float computed using the formula sum([P@k * rel for k, rel in enumerate(relevance)]) / sum(relevance) where P@k is the precision of the list at the cut off k. """ def argsort(arr, reverse=True): arr_ind = sorted([(a, i) for i, a in enumerate(arr)], reverse=reverse) return list(zip(*arr_ind))[1] num_docs = len(relevances) if isinstance(topn, int) and topn > 0: num_docs = min(num_docs, topn) indices = argsort(scores)[:num_docs] ranked_relevances = [1. * relevances[i] for i in indices] precision = {} for k in range(1, num_docs + 1): precision[k] = sum(ranked_relevances[:k]) / k num_rel = sum(ranked_relevances[:num_docs]) average_precision = sum(precision[k] * ranked_relevances[k - 1] for k in precision) / num_rel if num_rel else 0 return average_precision
e856a98630548313362aa1bf49749a0b32208e61
21,910
def _get_env_var(rctx, name, default): """Find an environment variable in system. Doesn't %-escape the value! Args: rctx: rctx name: environment variable name default: default value to return if env var is not set in system Returns: The environment variable value or the default if it is not set """ if name in rctx.os.environ: return rctx.os.environ[name] return default
1e98d7b65f1b7323caff51d897e7c5b5bedae3cf
21,913
def filter_for_sprl(c): """ Given a BIDSFile object, filter for sprl type file """ try: val = "sprlcombined" in c.entities["acquisition"] except KeyError: return False else: return val
6f05313701ecc01512fedf05709e5e13629c467d
21,920
def roll(lst, shift): """Roll elements of a list. This is similar to `np.roll()`""" return lst[-shift:] + lst[:-shift]
4805c646d4d6025b0ebcd660a020a58fb6078036
21,922
def count_distinct_occurence(calls, texts): """Return the count of distinct occurence of number Args: calls: list of calls texts: list of texts Returns: number of distinct number """ number_set = set() for record in calls+texts: number_set.add(record[0]) number_set.add(record[1]) return len(number_set)
4acf40c50bbd32b23735aaad2c581559829bb664
21,923
def get(sarif_struct, *path): """ Get the sarif entry at PATH """ res = sarif_struct for p in path: res = res[p] return res
f4c1eb9f98acb5e795d65ac427079748a9b89a6f
21,926
def swift_module_name(label): """Returns a module name for the given label.""" return label.package.lstrip("//").replace("/", "_") + "_" + label.name
e10dee81c5bbd3d5a1fc15ae94aebe74e8de94c6
21,927
def remove_duplicate_values(array_like, tol=0.0): """ Removes duplicate values from list (when tol=0.0) or remove approximately duplicate values if tol!=0.0. """ unique_values = [array_like[0]] for element in array_like: element_is_duplicate = False for uval in unique_values: if abs(uval - element) <= tol: element_is_duplicate = True if not element_is_duplicate: unique_values.append(element) return unique_values
afdad5db2aa00858aa9bcd29e1b64b744b2fb963
21,932
from pathlib import Path def create_job( create_job_header, create_job_body, create_job_tail, job_name_prefix, scenario_name, job_name_suffix, queue_name, ncores, work_dir, run_dir, # config_file, ): """ Create the job file. The jobs is created by assembling three parts: the job header, the body, and the final tail (post execution process). The different parameters will be injected in the respective job creation functions. Parameters ---------- create_job_header : callable The function that will create the header. create_job_body : callable The function that will create the job body. create_job_tail: callable The function that will create the job tail. job_name_prefix : str A prefix for the job name. Normally this is the name of the job test case, for example the PDB ID. Injected in `create_job_header`. scenario_name : str The name of the benchmark scenario. Injected in `create_job_header`. job_name_suffix : str An additional suffix for the job name. Normally, `BM5`. Injected in `create_job_header`. queue_name : str The name of the queue. Injected in `create_job_header`. ncores : int The number of cpu cores to use in the jobs. Injected in `create_job_header`. work_dir : pathlib.Path The working dir of the example. That is, the directory where `input`, `jobs`, and `logs` reside. Injected in `create_job_header`. run_dir : pathlib.Path The running directory of the scenario. config_file : pathlib.Path Path to the scenario configuration file. Injected in `create_job_body`. Returns ------- str The job file in the form of string. """ # create job header job_name = f'{job_name_prefix}-{scenario_name}-{job_name_suffix}' std_out = str(Path('logs', 'haddock.out')) std_err = str(Path('logs', 'haddock.err')) job_header = create_job_header( job_name, work_dir=work_dir, stdout_path=std_out, stderr_path=std_err, queue=queue_name, ncores=ncores, ) available_flag = str(Path(run_dir, 'AVAILABLE')) running_flag = str(Path(run_dir, 'RUNNING')) done_flag = str(Path(run_dir, 'DONE')) fail_flag = str(Path(run_dir, 'FAIL')) job_body = create_job_body(available_flag, running_flag, config_file) job_tail = create_job_tail(std_err, done_flag, fail_flag) return job_header + job_body + job_tail
acc38ee8dc0169173f71e1fa5f81e75b340250e1
21,934
def toUnicode(articles): """Convert a list of articles utf-8 encoded to unicode strings.""" return tuple([art.decode('utf_8') for art in articles])
6924a837d5a093b3e0ea358381d67a1fc011519c
21,936
def update_parent_child_relationships(links_dict, old_id, new_id): """ Update the parent-child relationships after clustering a firework by replacing all the instances of old_id with new_id Args: links_dict (list): Existing parent-child relationship list old_id (int): Existing id of the firework new_id (int): New id of the firework Returns: links_dict (list): Updated parent-child relationship list """ # Enumerate child ids and replace it with the new id for parent_id in links_dict: child_id_list = links_dict[parent_id] for index, child_id in enumerate(child_id_list): if child_id == old_id: child_id_list[index] = new_id break # Enumerate parent ids and replace it with the new id if old_id in links_dict: links_dict[new_id] = links_dict.pop(old_id) return links_dict
a08201e455ed87ebaae52542c99ff500df523367
21,938
def normalize_name(name): """Returns a normalized version of the given algorithm name.""" name = name.upper() # BouncyCastle uses X.509 with an alias of X509, Conscrypt does the # reverse. X.509 is the official name of the standard, so use that. if name == "X509": name = "X.509" # PKCS5PADDING and PKCS7PADDING are the same thing (more accurately, PKCS#5 # is a special case of PKCS#7), but providers are inconsistent in their # naming. Use PKCS5PADDING because that's what our docs have used # historically. if name.endswith("/PKCS7PADDING"): name = name[:-1 * len("/PKCS7PADDING")] + "/PKCS5PADDING" return name
77858e7217c3a17e3b041781f117e7b53dd4c57d
21,939
import torch def upfeat(input, prob): """ A function to compute pixel features from superpixel features Args: input (tensor): superpixel feature tensor. prob (tensor): one-hot superpixel segmentation. Returns: reconstr_feat (tensor): the pixel features. Shape: input: (B, N, C) prob: (B, N, H, W) reconstr_feat: (B, C, H, W) """ B, N, H, W = prob.shape prob_flat = prob.view(B, N, -1) reconstr_feat = torch.matmul(prob_flat.permute(0, 2, 1), input) reconstr_feat = reconstr_feat.view(B, H, W, -1).permute(0, 3, 1, 2) return reconstr_feat
8b1515d7e3c7cfbf656f4e60e4c5e7899e48dbbf
21,942
def remote(obj): """ Return the remote counterpart of a local object. :param obj: the local object :return: the corresponding remote entity """ try: return obj.__remote__ except AttributeError: return None
adf85d39797c158bd16f874f6ce9cf3867d6fb8b
21,948
import requests def download_pac(candidate_urls, timeout=1, allowed_content_types=None, session=None): """ Try to download a PAC file from one of the given candidate URLs. :param list[str] candidate_urls: URLs that are expected to return a PAC file. Requests are made in order, one by one. :param timeout: Time to wait for host resolution and response for each URL. When a timeout or DNS failure occurs, the next candidate URL is tried. :param allowed_content_types: If the response has a ``Content-Type`` header, then consider the response to be a PAC file only if the header is one of these values. If not specified, the allowed types are ``application/x-ns-proxy-autoconfig`` and ``application/x-javascript-config``. :return: Contents of the PAC file, or `None` if no URL was successful. :rtype: str|None """ if not allowed_content_types: allowed_content_types = {'application/x-ns-proxy-autoconfig', 'application/x-javascript-config'} if not session: sess = requests.Session() else: sess = session sess.trust_env = False # Don't inherit proxy config from environment variables. for pac_url in candidate_urls: try: resp = sess.get(pac_url, timeout=timeout) content_type = resp.headers.get('content-type', '').lower() if content_type and True not in [allowed_type in content_type for allowed_type in allowed_content_types]: continue if resp.ok: return resp.text except (requests.exceptions.ConnectionError, requests.exceptions.Timeout): continue
ba1554e01c0a3b2cf9251d686d946459aef40f2d
21,952
from io import StringIO def list_to_string_io(list_of_entries:list): """ Return file like object of the type StringIO from a given list of list of strings. Argument: - list_of_entries {list} - list of list of strings to transform to StringIO Example: [ ['AR8IEZO1187B99055E', 'SOINLJW12A8C13314C', 'City Slickers', 2008, 149.86404], ['AR558FS1187FB45658', 'SOGDBUF12A8C140FAA', 'Intro', 2003, 75.67628] ] Return: {StringIO} - file type object with values in input list concatenated. Example: 'AR8IEZO1187B99055E\tSOINLJW12A8C13314C\tCity Slickers\t2008\t149.86404\n AR558FS1187FB45658\tSOGDBUF12A8C140FAA\tIntro\t2003\t75.67628' """ return StringIO('\n'.join(['\t'.join([str(entry) for entry in set_of_entries]) for set_of_entries in list_of_entries]))
b71528876a2fd65264c1f77bdea96ab112616894
21,954
import re def parse_quast_result(path_to_quast_result): """ Args: path_to_quast_result (str): Path to the QUAST result file. Returns: dict: Parsed QUAST report For example: { "contigs_count": 72, "largest_contig": 692871, "N50": 299446, "L50": 6, "N75": 123167, "L75": 12, "total_length": 5182695, "percent_GC": 51.75, "complete_busco_percent": 100.0, "partial_busco_percent": 100.0 } """ quast_output = [] with open(path_to_quast_result, 'r') as quast_result: for line in quast_result: quast_output.append(line) def parse_quast_report_line(line): """ Takes a line of the quast report and returns the specific data that we're interested in from that line. Collapse multiple spaces into a tab char ('\t'), then split the line on tabs and take the second item. Cast floats to floats and ints to ints. '# contigs 751 ' -> '# contigs\t751\t' -> ['# contigs', '751', ''] -> '751' -> 751 """ result_data = re.sub(' {2,}', '\t', line).split('\t')[1] if re.match('\d+\.\d+', result_data): return float(result_data) else: return int(result_data) # Associate a regex that can be used to identify the line of interest in the quast report # with a string to use as a key in the output dict. quast_report_parsing_regexes = { '^# contigs {2,}\d+': 'num_contigs', '^Largest contig {1,}\d+': 'largest_contig', '^N50 +\d+': 'N50', '^NG50 +\d+': 'NG50', '^L50 +\d+': 'L50', '^LG50 +\d+': 'LG50', '^N75 +\d+': 'N75', '^NG75 +\d+': 'NG75', '^L75 +\d+': 'L75', '^LG75 +\d+': 'LG75', '^Total length {2,}\d+': 'total_length', '^Reference length +\d+': 'reference_length', '^GC \(%\) +\d+\.\d+': 'percent_GC', '^Reference GC \(%\) +\d+\.\d+': 'reference_percent_GC', '^Genome fraction \(%\) +\d+\.\d+': 'genome_fraction_percent', '^Duplication ratio +\d+\.\d+': 'duplication_ratio', '^Complete BUSCO \(\%\) +\d+\.\d+': 'complete_busco_percent', '^Partial BUSCO \(\%\) +\d+\.\d+': 'partial_busco_percent', } quast_result = {} for line in quast_output: for regex, key in quast_report_parsing_regexes.items(): if re.match(regex, line): quast_result[key] = parse_quast_report_line(line) return quast_result
d7a620a1dedbcbdf00b82a6b03c816ac963503cb
21,957
def is_digit(c) -> bool: """Checks if given char is a digit.""" try: return ord(c) >= 48 and ord(c) <= 57 except TypeError: return False
81eb6d9a3b73e567dff9b5c040309073db4ed3eb
21,958
def stations_by_river(stations): """Find stations which are by the same river Args: stations (list): list of MonitoringStation objects Returns: dictionary: river name as key to a list of station names """ dic_stations_river = {} for station in stations: key = station.river if key in dic_stations_river: dic_stations_river[key].append(station.name) else: dic_stations_river[key] = [station.name] return dic_stations_river
f094ee7cae07a0b6037aac75559b68b09e88c26d
21,963
from typing import Iterable from typing import Tuple from typing import Any from typing import List def unzip(tuples: Iterable[Tuple[Any, ...]]) -> List[Iterable[Any]]: """The inverse of the `zip` built-in function.""" return [list(x) for x in zip(*tuples)]
10dd4755c501f64f6b98dea8abd7677d6fa23535
21,964
def clean_unicode(text): """A function to clean unsee unicode character like \\u2xxx Args: text (str): Can be news title or news body Returns: [str]: A unicode-free string """ clean_text = text.encode("ascii", errors="replace").strip().decode("ascii") clean_text = clean_text.replace("?", ' ') return clean_text
ed62e2644818120ea8417e2549bac76870447e55
21,967
import math def make_divisible(x, divisor, ceil=True): """ Returns x evenly divisible by divisor. If ceil=True it will return the closest larger number to the original x, and ceil=False the closest smaller number. """ if ceil: return math.ceil(x / divisor) * divisor else: return math.floor(x / divisor) * divisor
02df42d8c490ac0c85d7ad983c0c9c838bdfa088
21,968
def color_mapping_func(labels, mapping): """Maps an label (integer or string) to a color""" color_list = [mapping[value] for value in labels] return color_list
da1f4f5432a28ef972bdcd74bd351ab4d5d09aad
21,969
def bytes_to_str(b): """Converts a byte string argument to a string""" if isinstance(b, str): return b return str(b, 'latin1')
1494de161b7a9b4ef7743627bbbeba7624c7fa1c
21,975
def _dict_key_to_key(dictionary): """creates a dummy map from the nominal key to the nominal key""" return {key : key for key in dictionary.keys()}
9feed672c1f678ed58a4d29fea5fbb0848b8b483
21,981
def seconds_to_nanoseconds(seconds): """Convert the specified number of seconds to nanoseconds :param seconds: an integer representing a certain number of seconds :returns: an integer (or float) representation of the specified number of seconds as nanoseconds """ return seconds * 1000000000
d9f4687335b263a73f7f22065bfbbebb12468ce3
21,982
def get_board_copy(board): """Make a duplicate of the board list and return it the duplicate.""" return list(board)
36aa2bd0719dc73602fd1fda68b120ea9c6a4a4c
21,984
def map_challenge_set(x): """ Map a SQLAlchemy ChallengeSet model object into a generic map. :param x: SQLAlchemy ChallengeSet model. :return: Generic map containing relevant details for REST API. """ return {"id": x.id, "slug": x.slug, "name": x.name, "description": x.description}
5a70a8d14a486c9ee57475d8b4c11b8d956430b1
21,985
import requests def get_session(username, password): """ Returns a request session for the SANtricity RestAPI Webserver """ request_session = requests.Session() # Default credentials request_session.auth = (username, password) request_session.headers = {'Accept': 'application/json', 'Content-Type': 'application/json'} # Ignore the self-signed certificate issues for https request_session.verify = False return request_session
42f896195f714bad3928146b49bac88564b4a477
21,986
from typing import List import re def lexLine(line : str) -> List[str]: """Tokenizes a code line Args: line (str): A line of VLang code Returns: List(str): A list of tokenized strings """ if line == "\n": return ["\n"] elif line.startswith('#'): return ["#"] splittedList = re.split('\s|(\")', line) # Splits the line based on quotes or whitespaces(spaces) filteredList = filter(None, splittedList) # Filters None's out of the list. These ocur due to the regex expression above return list( filteredList )
3f8c3633300275e43c93381c91202ac02aeca01b
21,987
def parse_http_header(header, header_key): """ **Parse HTTP header value** Parse the value of a specific header from a RAW HTTP response. :param header: String containing the RAW HTTP response and headers :type header: str :param header_key: The header name of which to extract a value from :type header_key: str :return: The value of the header :rtype: str """ split_headers = header.split('\r\n') for entry in split_headers: header = entry.strip().split(':', 1) if header[0].strip().lower() == header_key.strip().lower(): return ''.join(header[1::]).split()[0]
993b8190d631accf7c63e259aa5f4f4c4b657c0e
21,988
def is_dictable(obj): """Returns ``True`` if `obj` has a ``to_dict()`` method.""" return hasattr(obj, "to_dict")
0a12ccae0a0d3242db0bb7ad6537f2ff34ee7c48
21,994
def _mean(values:list)->float: """ Return mean """ return sum(values)*1.0/len(values)
dc433357122e84523200a6e932c787f96ff66185
21,995
def get_name(i): """ Return the name of i-th component of a sensor sample """ assert i >= 0 and i <= 5, f"Component {i} is not supported, must be between 0 and 5" names = ["x_acc", "y_acc", "z_acc", "x_gyro", "y_gyro", "z_gyro"] return names[i]
9775b41ecfb28a5cefaee0bbf83569f9d115d4ea
21,996
from datetime import datetime def datetime_to_str(dt: datetime) -> str: """Get a timestamp string from a datetime object. Args: dt: The datetime object. Returns: The timestamp string. """ return dt.strftime("%Y_%m_%d_%H_%M_%S")
9b604c18e648ce6783cd0ff8897842a8576225ab
22,003
import errno def _retry_if_file_already_exists(exception): """Retry if file already exist exception raised.""" return ( isinstance(exception, OSError) and exception.errno == errno.EEXIST )
d0094c155cc43f8172f85b1aa1fa42d7348330c2
22,005
def no_deprecated_adapter(adapter): """Modify an adapter to disable deprecated symbols. ``no_deprecated_adapter(adapter)(name, active, section)`` is like ``adapter(name, active, section)``, but unsets all deprecated symbols and sets ``MBEDTLS_DEPRECATED_REMOVED``. """ def continuation(name, active, section): if name == 'MBEDTLS_DEPRECATED_REMOVED': return True if adapter is None: return active return adapter(name, active, section) return continuation
2c18bf3d059a1a2d7555f09662509688023b61ed
22,007