content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def get_tag_name(tag): """ Extract the name portion of a tag URI. Parameters ---------- tag : str Returns ------- str """ return tag[tag.rfind("/") + 1:tag.rfind("-")]
e24f0ae84ed096ec71f860291d1e476c75bf8370
704,781
def cbar(ni, nj, resources, commcost): """ Average communication cost """ n = len(resources) if n == 1: return 0 npairs = n * (n - 1) return 1. * sum(commcost(ni, nj, a1, a2) for a1 in resources.values() for a2 in resources.values() if a1 != a2) / npairs
b215de30bcb019e2299edbb61591b7a1c129c58b
704,782
def imap_any(conditions): """ Generate an IMAP query expression that will match any of the expressions in `conditions`. In IMAP, both operands used by the OR operator appear after the OR, and chaining ORs can create very verbose, hard to parse queries e.g. "OR OR OR X-GM-THRID 111 X-GM-THRID 222 OR X-GM-THRID 333 X-GM-THRID 444 X-GM-THRID 555". Using logical equivalence, a functionally identical query can be built with "AND" and "NOT"; (a || b || c...) == !(!a && !b && !c...). Arguments: conditions: List of IMAP expressions. Returns: An IMAP expression that evaluates to "true" if any of the conditions are true. """ if not conditions: return "" negations = [("NOT %s" % condition) for condition in conditions] return "(NOT (%s))" % " ".join(negations)
de4ef1680cd2c8370d82640ff95186ed3ea81202
704,783
def format_sources(sources): """ Make a comma separated string of news source labels. """ formatted_sources = "" for source in sources: formatted_sources += source["value"] + ',' return formatted_sources
f9f86f11e4dfe9ecd3fbbd5e14d3ca750a4e1a5a
704,784
def _get_file_url_from_dropbox(dropbox_url, filename): """Dropbox now supports modifying the shareable url with a simple param that will allow the tool to start downloading immediately. """ return dropbox_url + '?dl=1'
fe0256ae747826dbbe5ac3c3a4afa42e0584699a
704,785
def launch_coef_scores(args): """ Wrapper to compute the standardized scores of the regression coefficients, used when computing the number of features in the reduced parameter set. @param args: Tuple containing the instance of SupervisedPCABase, feature matrix and response array. @return: The standardzed scores of the coefficients. """ spca, X, y = args scoefs = spca._compute_stnd_coefs(X, y) return scoefs
02423ef564b55dfcc37bddadcc813edffba05795
704,786
def create_link(url): """Create an html link for the given url""" return (f'<a href = "{url}" target="_blank">{url}</a>')
77a5375369be2be140a69a4521c50a92cee2d5ed
704,787
def cummean(x): """Return a same-length array, containing the cumulative mean.""" return x.expanding().mean()
b5a35c56cb78e0588dd5be64a75384c4cd81ccb5
704,788
def get_syntax_errors(graph): """List the syntax errors encountered during compilation of a BEL script. Uses SyntaxError as a stand-in for :exc:`pybel.parser.exc.BelSyntaxError` :param pybel.BELGraph graph: A BEL graph :return: A list of 4-tuples of line number, line text, exception, and annotations present in the parser :rtype: list[tuple] """ return [ (number, line, exc, an) for number, line, exc, an in graph.warnings if isinstance(exc, SyntaxError) ]
a0f3493b88b081de3613397c997d71dabdae78be
704,789
def is_valid_combination( row ): """ Should return True if combination is valid and False otherwise. Test row that is passed here can be incomplete. To prevent search for unnecessary items filtering function is executed with found subset of data to validate it. """ n = len(row) if n>1: # Brand Y does not support Windows 98 if "98" == row[1] and "Brand Y" == row[0]: return False # Brand X does not work with XP if "XP" == row[1] and "Brand X" == row[0]: return False if n > 4: # Contractors are billed in 30 min increments if "Contr." == row[3] and row[4] < 30: return False return True
c0758c3d30debbd3fc3d5f07d6728c23bfb71145
704,790
import os def get_out_name(subdataset_name_tuple, out_ext=""): """ Get output file name for sub dataset Takes tuple with (subdataset name, description) """ subdataset_name = subdataset_name_tuple[0] outname = os.path.split(subdataset_name)[-1] outname = outname.replace(".xml","") outname = outname.replace(":","_") # Get UTM string from description. utm_str = subdataset_name_tuple[1].split(",")[-1].replace(" ","") outname = "_".join(outname.split("_")[:-2]) outname = "{}_{}".format(outname, utm_str) outname = outname + out_ext return outname
d55b77546e579f1f0887f4e88df2ec9825991c9f
704,791
def get_callback(request, spider): """Get request.callback of a scrapy.Request, as a callable.""" if request.callback is None: return getattr(spider, 'parse') return request.callback
a1f62822d812bebdeabafa14edda4462949657d8
704,793
import requests def veryrandom(msg, min=1, max=6, base=10, num=1): """Los datos generados por veryrandom provienen de random.org, lo cual es una garantía adicional de la aleatoriedad de los resultados. Se obtendrá un número aleatorio entre los 2 definidos, ambos inclusive. """ url = 'http://www.random.org/integers/' try: data = requests.get(url, params={ 'min': min, 'max': max, 'base': base, 'num': num, 'col': 1, 'format': 'plain', 'rdn': 'new', }).text except Exception as e: data = str(e) return data.replace('\n', ' ')
75320750423e060ba117ed8ce6bfb84a02d16411
704,794
from math import log def calcShannonEnt(dataSet): """ 计算香农熵,用于判断划分 Parameters ---------- dataSet:数据集(可能是原始数据集,有可能是划分子集) Returns:数据集的信息熵,根据分类标签信息确定 ------- """ numEntries = len(dataSet) # 数据总量 labelCounts = {} # 创建一个数据字典,用来计数各个类别 for featVec in dataSet.values: # 每次取一行 currentLabel = featVec[-1] # 默认每行最后一列的元素是样本类标签 if currentLabel not in labelCounts.keys(): labelCounts[currentLabel] = 0.0 # 不存在的类别要进行添加,初始为0 labelCounts[currentLabel] += 1 # 计数累进 # print(labelCounts) shannonEnt = 0.0 # 从0累加信息熵 for key in labelCounts: # 遍历数据字典的键 prob = float(labelCounts[key]) / numEntries # 计算数据集i类样本所占比例P_i shannonEnt -= prob * log(prob, 2) # 以二为底 return shannonEnt
bdade097a799feff0cb41eb15b3438ca1e088983
704,795
def merge_values(list1, list2): """Merge two selection value lists and dedup. All selection values should be simple value types. """ tmp = list1[:] if not tmp: return list2 else: tmp.extend(list2) return list(set(tmp))
9412dd28c6110bc6df70ac7d563cb19d1211beb8
704,798
import argparse def parse_arguments(): """Parse the command line arguments. Returns: Parsed arguments. """ parser = argparse.ArgumentParser(description='tail for BAMs') parser.add_argument( 'filenames', help='BAMs on which to perform the tail operation', nargs='*', metavar='FILE' ) parser.add_argument( '--version', '-v', help='print the version', action='store_true' ) return parser.parse_args()
4c09fecf32bcc5a4d016d012a87996f5b08b765f
704,799
def format_heading(level, text): """Create a heading of <level> [1, 2 or 3 supported].""" underlining = ['=', '-', '~', ][level-1] * len(text) return '%s\n%s\n\n' % (text, underlining)
6b8caaa134ddc32666a4d7ce62a775d6ffda7425
704,800
def get_new_size_zoom(current_size, target_size): """ Returns size (width, height) to scale image so smallest dimension fits target size. """ scale_w = target_size[0] / current_size[0] scale_h = target_size[1] / current_size[1] scale_by = max(scale_w, scale_h) return (int(current_size[0] * scale_by), int(current_size[1] * scale_by))
e0b42eab3d35ba5c662282cab1ffa798327ad92a
704,801
def get_name_component(x509_name, component): """Gets single name component from X509 name.""" value = "" for c in x509_name.get_components(): if c[0] == component: value = c[1] return value
6a473a96b99daa6f69fd6aac45f2594af933d4bd
704,802
def song_line(line): """Parse one line Parameters ---------- line: str One line in the musixmatch dataset Returns ------- dict track_id: Million song dataset track id, track_id_musixmatch: Musixmatch track id and bag_of_words: Bag of words dict in {word: count} format Notes ----- Musixmatch starts words at index 1, we are shifting it so it starts at 0 """ elements = line.split(',') track_id = elements[0] track_id_musixmatch = elements[1] bag_of_words = [s.split(':') for s in elements[2:]] # shift index so it starts at zero bag_of_words_dict = {int(idx) - 1: int(count) for idx, count in bag_of_words} return dict(track_id=track_id, track_id_musixmatch=track_id_musixmatch, bag_of_words=bag_of_words_dict)
2108dfa037aa6293a0b3111a97c354e62c0dd2a5
704,803
def remove_st_less_than(dataframe, column='ST', less_than=0.001): """ Remove any entry with an ST less than specified Args: dataframe (pandas.Dataframe): dataframe containing sensitivity analysis output column (str): Column name, default is 'ST' less_than (float): Remove anything less than this Returns: New dataframe. """ new_df = dataframe[dataframe[column] > less_than] return new_df
2ba052004c436f8d527ab9b5bc3e76c90aa5dce9
704,804
def count_positives_sum_negatives2(arr): """ More space efficient, but not as concise as above """ if not arr: return arr count = 0 total = 0 for num in arr: if num > 0: count += 1 else: total += num return [count, total]
f1f8de28dcf6669d49044a43e6a8603f0da01577
704,805
def fact(n): """Return the factorial of the given number.""" r = 1 while n > 0: r = r * n n = n - 1 return r
7bdcdc759b49a9cd72f7bf3f12a18fc03ce50668
704,806
from datetime import datetime import os from shutil import copyfile def create_logdir(directory, algorithm, env_name, config_path): """ Create a directory inside the specified directory for logging experiment results and return its path name. Include the environment name (Sharpe, etc.) in the directory name, and also copy the config """ experiment_dir = f"{directory}/{algorithm}-{env_name}-{datetime.now():%Y-%m-%d_%H:%M:%S}" if not os.path.exists(experiment_dir): os.makedirs(experiment_dir) if config_path is not None: copyfile(config_path, f"{experiment_dir}/config.yml") return experiment_dir
9b1e188e2925dec49dad4c31bba8eec9ab0ddbd0
704,807
def list_column(column, original_name): """Get all non nan values from column.""" if original_name == 'original_formatted': list_filled = [[x for x in row if str(x) != 'nan'] for row in column] else: list_filled = [[_] for _ in column] return list_filled
ae8b900cdcf3e59ff4ede3b3dc0321d93ea07253
704,809
def apply_functions(lst, functions): """ :param lst: list of values :param functions: list of functions to apply to each value. Each function has 2 inputs: index of value and value :return: [func(x) for x in lst], i.e apply the respective function to each of the values """ assert len(lst) == len(functions) for i, item in enumerate(lst): func = functions[i] # get function lst[i] = func(i, item) # apply function return lst
679a2219008e438249e1227d5aab6529019c497c
704,811
def location_normalize(location): """ Normalize location name `location` """ #translation_table = dict.fromkeys(map(ord, '!@#$*;'), None) def _remove_chars(chars, string): return ''.join(x for x in string if x not in chars) location = location.lower().replace('_', ' ').replace('+', ' ').strip() if not location.startswith('moon@'): location = _remove_chars(r'!@#$*;:\\', location) return location
4b8aaec23ef9763bc02fd4d3635ba930f8c3dcea
704,812
def dict_is_test(data): """helper function to check whether passed argument is a proper :class:`dict` object describing a test. :param dict data: value to check :rtype: bool """ return ( isinstance(data, dict) and "type" in data and data["type"] == "test" and "id" in data and "attributes" in data and isinstance(data["attributes"], dict) ) # optionally, it can have "links" dict
320b47f8f41f42f6a6554741c9b2de38b370605a
704,813
from typing import Set def GetZonesInRegion(region: str) -> Set[str]: """Returns a set of zones in the region.""" # As of 2021 all Azure AZs are numbered 1-3 for eligible regions. return set([f'{region}-{i}' for i in range(1, 4)])
e539662604eb5da2583630844dd54d11d266c827
704,814
def compare_3PC_keys(key1, key2) -> int: """ Return >0 if key2 is greater than key1, <0 if lesser, 0 otherwise """ if key1[0] == key2[0]: return key2[1] - key1[1] else: return key2[0] - key1[0]
d134eaaa4ef8f218164be4e5bc6fced01c3de7eb
704,815
def _GetMSBuildToolSettings(msbuild_settings, tool): """Returns an MSBuild tool dictionary. Creates it if needed.""" return msbuild_settings.setdefault(tool.msbuild_name, {})
3a2cb3e9c8910a6901be0937be18aac00d532e2b
704,816
import struct def float_2_bytes(f, is_little_endian=False): """ :param f: :param is_little_endian: :return: """ # 小端数据返回 if is_little_endian: return struct.pack('<f', f) # 大端数据返回 return struct.pack('>f', f)
cd13cfc7179baf28cda669b2c6fccdce58c63730
704,817
import requests def load_SMI_data_from_Helmholtz(): """ Downloading SMI topsoil data from helmholtz website """ url = "https://www.ufz.de/export/data/2/237851_SMI_L02_Oberboden_monatlich_1951_2018_inv.zip" # noqa: E501 response = requests.get(url) return response
854f6b2f398764b7afc89cc3b9ca3b95b2d9a345
704,818
import os def find_wfss_config_filename(pathname, instrument, filtername, mode): """Construct the name of the WFSS configuration file given instrument parameters Parameters ---------- pathname : str Path where the configuration files are located instrument : str Instrument name (e.g. 'nircam') filtername : str Name of the crossing filter (e.g. 'f444w') mode : str String containing dispersion direction ('R' or 'C') as well as module name (for NIRCam). e.g. for NIRCam - modA_R and for NIRISS GR150R Returns ------- config : str Full path and filename of the appropriate configuation file """ config = os.path.join(pathname,"{}_{}_{}.conf".format(instrument.upper(), filtername.upper(), mode)) return config
eb0b7cca8e0715d28dc16bd90b90d357ea612cba
704,819
def convert_to_date(col): """Convert datetime to date.""" return col.date()
c6ac8febf4751e8f2c2c27fc740de286f2870cbe
704,820
import re from typing import Counter def perfect_match(_clipped, _seq, offset = 0, min_nt = 2): """ perfect match between clipped reads and breakend sequence for whole length min_nt : minimal nt match is better to be larger than 2, 1 nt match is 25% change by random, 2 nt match is 0.0625 change by random, 3 nt match is 0.0156 change by random, """ if (offset > 0): m = [re.search(_x[offset:], _seq) for _x in _clipped if (len(_x) > offset) and (len(_x) >= min_nt)] else: m = [re.search(_x, _seq) for _x in _clipped if (len(_x) >= min_nt)] mm = [i.start() for i in m if i is not None] if (mm): # get the most common split point return Counter(mm).most_common()[0] else: return (0, 0)
806c0b86d3f97b71c191a2c95fb77ef16bf804e2
704,821
import argparse def get_input_args(): """ Retrieves and parses the command line arguments provided by the user when they run the program from a terminal window. If the user fails to provide some or all of the arguments, then the default values are used for the missing arguments. This function returns these arguments as an ArgumentParser object. Returns: parse_args() -data structure that stores the command line arguments object """ # Create Parse parser = argparse.ArgumentParser(description='Retrieving inputs from user') # Create command line arguments parser.add_argument('data_directory', type = str, default = './', help = 'path to the data directory (default: ./)') parser.add_argument('--save_dir', type = str, default = './', help = 'path to the folder to save checkpoint file (default: ./)') parser.add_argument('--arch', type = str, default = 'VGG16', help = 'CNN Model Architecture: vgg16, alexnet or densenet161 (default: VGG16)') parser.add_argument('--learning_rate', type = float, default = 0.002, help = 'Learning rate (default: 0.002)') parser.add_argument('--epochs', type = int, default = 1, help = 'Epochs (default: 1)') parser.add_argument('--dropout', type = float, default = 0.1, help = 'Dropout (default: 0.1)') return parser.parse_args()
830f31adf1770ebcfaeb6645ce606c758f38bd0f
704,822
import torch def accuracy(output, target): """Computes the accuracy over the top predictions""" with torch.no_grad(): batch_size = target.size(0) _, preds = torch.max(output.data, 1) correct = (preds == target).sum().item() return correct/batch_size
8f4dfde0e00f12d889b403265d50a379930ba3c8
704,823
from bs4 import BeautifulSoup def get_absolute_url(body_string: str): """Get absolute manga mangadex url""" parser = BeautifulSoup(body_string, 'html.parser') for link_elements in parser.find_all('link'): # aiming for canonical link try: rel = link_elements.attrs['rel'] href = link_elements.attrs['href'] except KeyError: continue else: if 'canonical' in rel: return href else: continue
35ca71dba04c243ab7c4cfa4893326ddeb480336
704,824
from typing import Any def isstring(var:Any, raise_error:bool=False) -> bool: """Check if var is a string Args: var (str): variable to check raise_error (bool, optional): TypeError raised if set to `True`. Defaults to `False`. Raises: TypeError: raised if var is not string Returns: bool: `True` if var is a string """ is_ =isinstance(var, str) if not is_ and raise_error: raise TypeError(f'String expected: {var=} is not a str') return is_
897c43539099c3d0b9b38abccce88869a90b9d9e
704,826
def placeholder(value, token): """ Add placeholder attribute, esp. for form inputs and textareas """ value.field.widget.attrs["placeholder"] = token return value
16bb46a6e92c3a59972589ed28315e681a7580f3
704,827
def _cs_count_top_bottom(fragments): """Counting: top and bottom of the entire core sample""" cs_top, cs_bottom = 1e10, 0 for fragment in fragments: cs_top = min(cs_top, float(fragment['top'])) cs_bottom = max(cs_bottom, float(fragment['bottom'])) return cs_top, cs_bottom
3b9a98993a837ff7c08980f644abbb6aad13f908
704,828
def get_index_from_filename( file_name: str ) -> str: """ Returns the index of chart from a reproducible JSON filename. :param file_name: `str` The name of the file without parent path. :returns: `str` The index of the chart (e.g., 1) or an empty string. """ assembled_index = "" for name_char in file_name.replace(".json", "")[::-1]: if name_char.isnumeric(): assembled_index += name_char else: break return assembled_index[::-1]
2cddcbcd9bf5079d58c75f19b5d2bf5b44ded173
704,829
def notas(*num, sit=False): """ Essa função cria um dicionário que guarda várias informações sobre o boletim de um aluno :param num: lista de notas do aluno :param sit: situação do aluno (aprovado, reprovado, recuperação) :return: retorna o dicionário completo """ boletim = {} boletim['Quantidade de notas'] = len(num) boletim['Maior'] = max(num) boletim['Menor'] = min(num) boletim['Média do aluno'] = sum(num) / len(num) if sit: if boletim['Média do aluno'] >= 7: boletim['Situação'] = 'APROVADO' elif boletim['Média do aluno'] < 6: boletim['Situação'] = 'REPROVADO' else: boletim['Situação'] = 'RECUPERAÇÃO' return boletim
a154d39be15018ce764e71c7bc97d9b4b25575df
704,830
def summarize(text): """ Summarizes some text """ if len(text) > 20: summary = text[0:10] + " ... " + text[-10:] else: summary = text summary = summary.replace("\n", "\\n") return summary
5c37f7a50e2b533bf3e05b598ce68b2c4de88fe1
704,831
def sexastr2deci(sexa_str): """Converts as sexagesimal string to decimal Converts a given sexagesimal string to its decimal value Args: A string encoding of a sexagesimal value, with the various components separated by colons Returns: A decimal value corresponding to the sexagesimal string Examples: >>> sexastr2deci('15:30:00') 15.5 >>> sexastr2deci('-15:30:45') -15.5125 """ if sexa_str[0] == '-': sgn = -1.0 dms = sexa_str[1:].split(':') # dms = degree minute second else: sgn = 1.0 dms = sexa_str.split(':') decival = 0 for i in range(0, len(dms)): decival = decival + float(dms[i]) / (60.0 ** i) return decival * sgn
46a9d8752b05b1579ecc2b85d94c28613a08ab3c
704,832
def reverb2mix_transcript_parse(path): """ Parse the file format of the MLF files that contains the transcripts in the REVERB challenge dataset """ utterances = {} with open(path, "r") as f: everything = f.read() all_utt = everything.split("\n.\n") for i, utt in enumerate(all_utt): if i == 0: assert utt[:7] == "#!MLF!#" utt = utt[7:] words = utt.split("\n") label = words[0][4:-6] sentence = " ".join(words[1:]) speaker = label[:-5] utterance = label[-5:] utterances[label] = { "utterance_id": utterance, "speaker_id": speaker, "transcript": sentence, } return utterances
c8a1aa0c8a4d0dec6626cf8e9d2491336ee42d5a
704,833
def extract_qa_bits(qa_band, start_bit, end_bit): """Extracts the QA bitmask values for a specified bitmask (starting and ending bit). Parameters ---------- qa_band : numpy array Array containing the raw QA values (base-2) for all bitmasks. start_bit : int First bit in the bitmask. end_bit : int Last bit in the bitmask. Returns ------- qa_values : numpy array Array containing the extracted QA values (base-10) for the bitmask. Example ------- >>> >>> >>> >>> """ # Initialize QA bit string/pattern to check QA band against qa_bits = 0 # Add each specified QA bit flag value/string/pattern # to the QA bits to check/extract for bit in range(start_bit, end_bit + 1): qa_bits += bit ** 2 # Check QA band against specified QA bits to see what # QA flag values are set qa_flags_set = qa_band & qa_bits # Get base-10 value that matches bitmask documentation # (0-1 for single bit, 0-3 for 2 bits, or 0-2^N for N bits) qa_values = qa_flags_set >> start_bit return qa_values
523dc1ee149af5c5e9a494b5fe3a3c14bc3186d2
704,834
import logging def combine_ecgs_and_clinical_parameters(ecgs, clinical_parameters): """ Combines ECGs and their corresponding clinical parameters :param ecgs: List of ECGs :param clinical_parameters: Corresponding clinical parameters :return: Medical data for each patient including ECGs and the patients clinical parameters """ combined = {} for record_id in ecgs: ecg = ecgs[record_id] try: cp = clinical_parameters[record_id] except KeyError: logging.warning( 'No clinical parameters available in datapipeline for record "{}". Skipping record.'.format(record_id)) continue combined[record_id] = dict(ecg) combined[record_id].update(cp) return combined
91ca17f62ff36776980a74ecd5334ae9a55ade18
704,835
def get_question_types(container): """ SELECTOR FOR RETURNING QUESTION TYPES """ return container.keys()
0c667e893323c106319038d19f333d233a5d1e07
704,836
import requests import urllib3 def uploader(dict_in): """Post the global dictionary to the server This function contains a post request to the flask server. The global dictionary in the patient GUI that saved all the information to be upload will be posted. This function will also catch the exceptions that the server can not be found or breakdown. Args: dict_in (dict): The global dictionary in the client_gui that store all the information to be sent to the server and save in mongoDB Returns: str: "uploaded" if the request is successfully made and "failed" if there are something wrong with the connection to the server """ try: r = requests.post("http://127.0.0.1:5000/api/upload", json=dict_in) print(r.status_code) print(r.text) return "Uploaded" except (requests.exceptions.ConnectionError, ConnectionRefusedError, urllib3.exceptions.NewConnectionError, urllib3.exceptions.MaxRetryError): return "failed"
a454bb7e0523e48d851efb2a46d877aea12dd38e
704,837
def architecture_is_32bit(arch): """ Check if the architecture specified in *arch* is 32-bit. :param str arch: The value to check. :rtype: bool """ return bool(arch.lower() in ('i386', 'i686', 'x86'))
a0cfaef4b03bc8cf335f0d19a3e46457db7574a9
704,838
def old_func4(self, x): """Summary. Further info. """ return x
7417bc8b52ec36a510a73cc8669a92b3603e6169
704,839
def generate_config(context): """ Entry point for the deployment resources. """ resources = [] project_id = context.env['project'] bucket_name = context.properties.get('name') or context.env['name'] # output variables bucket_selflink = '$(ref.{}.selfLink)'.format(bucket_name) bucket_uri = 'gs://' + bucket_name + '/' bucket = { 'name': bucket_name, 'type': 'storage.v1.bucket', 'properties': { 'project': project_id, 'name': bucket_name } } optional_props = [ 'location', 'versioning', 'storageClass', 'predefinedAcl', 'predefinedDefaultObjectAcl', 'logging', 'lifecycle', 'labels', 'website' ] for prop in optional_props: if prop in context.properties: bucket['properties'][prop] = context.properties[prop] resources.append(bucket) # If IAM policy bindings are defined then those bindings need to be applied storage_provider_type = 'gcp-types/storage-v1:storage.buckets.setIamPolicy' bindings = context.properties.get('bindings', []) if bindings: iam_policy = { 'name': bucket_name + '-iampolicy', 'action': (storage_provider_type), 'properties': { 'bucket': '$(ref.' + bucket_name + '.name)', 'project': project_id, 'bindings': bindings } } resources.append(iam_policy) return { 'resources': resources, 'outputs': [ { 'name': 'storageBucketSelfLink', 'value': bucket_selflink }, { 'name': 'storageBucketURL', 'value': bucket_uri } ] }
5ceca9cf90b5435368ffdb9bdcf1532eec31ec64
704,840
def tw_mock(): """Returns a mock terminal writer""" class TWMock: WRITE = object() def __init__(self): self.lines = [] self.is_writing = False def sep(self, sep, line=None): self.lines.append((sep, line)) def write(self, msg, **kw): self.lines.append((TWMock.WRITE, msg)) def _write_source(self, lines, indents=()): if not indents: indents = [""] * len(lines) for indent, line in zip(indents, lines): self.line(indent + line) def line(self, line, **kw): self.lines.append(line) def markup(self, text, **kw): return text def get_write_msg(self, idx): flag, msg = self.lines[idx] assert flag == TWMock.WRITE return msg fullwidth = 80 return TWMock()
a843503d3e360ed4412a020a4ec37f302ec4edaa
704,841
def matsubtraction(A,B): """ Subtracts matrix B from matrix A and returns difference :param A: The first matrix :param B: The second matrix :return: Matrix difference """ if(len(A)!=len(B) or len(A[0])!=len(B[0])): return "Subtraction not possible" for i in range(len(A)): for j in range(len(A[0])): A[i][j]=A[i][j]-B[i][j] return A
e10ca0e218d7995c0052928b4be96c2bae8959e7
704,842
from typing import OrderedDict def order_keys(order): """ Order keys for JSON readability when not using json_log=True """ def processor(logger, method_name, event_dict): if not isinstance(event_dict, OrderedDict): return event_dict for key in reversed(order): if key in event_dict: event_dict.move_to_end(key, last=False) return event_dict return processor
b3ddc250dc6a7e76b8d980ab81fbf4a9de3d6268
704,843
import os def get_backend(): """ Returns the currently used backend. Default is tensorflow unless the VXM_BACKEND environment variable is set to 'pytorch'. """ return 'pytorch' if os.environ.get('VXM_BACKEND') == 'pytorch' else 'tensorflow'
ae93dcf95c5712189d603a9a12f32298c32937b5
704,844
def mpls_label_group_id(sub_type, label): """ MPLS Label Group Id sub_type: - 1: L2 VPN Label - 2: L3 VPN Label - 3: Tunnel Label 1 - 4: Tunnel Label 2 - 5: Swap Label """ return 0x90000000 + ((sub_type << 24) & 0x0f000000) + (label & 0x00ffffff)
f0235d1cd8baaf601baf0db43b81417d3d5823ac
704,845
import logging def compare_results(out_dict, known_problems_dict, compare_warnings): """Compare the number of problems and warnings found with the allowed number""" ret = 0 for key in known_problems_dict.keys(): try: if out_dict[key]['problems'] > known_problems_dict[key]['problems']: logging.info("More problems found than expected %d > %d", out_dict[key]['problems'], known_problems_dict[key]['problems']) ret = 1 if compare_warnings and (out_dict[key]['warnings'] > known_problems_dict[key]['warnings']): logging.info("More warnings found than expected %d > %d", out_dict[key]['warnings'], known_problems_dict[key]['warnings']) ret = 1 except KeyError: logging.info("Expected key %s not found in dictionary %s %s", key, str(known_problems_dict), str(out_dict)) ret = 1 logging.info("Results: " + str(out_dict)) return ret
96cde5d5202d62a7cf135eb6af9b84bee64e22aa
704,846
import torch def get_spin_interp(zeta: torch.Tensor) -> torch.Tensor: """Compute spin interpolation function from fractional polarization `zeta`.""" exponent = 4.0 / 3 scale = 1.0 / (2.0 ** exponent - 2.0) return ((1.0 + zeta) ** exponent + (1.0 - zeta) ** exponent - 2.0) * scale
b1abced09aead7394be773d93d59a621cda98d14
704,847
def word_show(vol, guess, store): """ param vol: str, the word from def random_word param guess: str, the letter user guessed param store: str, the string showing correct letters user guessed return: str, answer """ answer = '' if guess == '': for i in vol: answer += '-' else: for j in range(len(vol)): if guess == vol[j]: answer += guess else: answer += store[j] return answer
65178dda52c61abbae682878dcf2439f20e51b5f
704,848
def _gradual_sequence(start, end, multiplier=3): """Custom nodes number generator The function gives the list of exponentially increase/decrease integers from both 'start' and 'end' params, which can be later used as the number of nodes in each layer. _gradual_sequence(10, 7000, multiplier=5) gives [50, 250, 1250, 6250], _gradual_sequence(6000, 10, multiplier=5) gives [1250, 250, 50] as a return sequence. Args: start: lower limit(exclusive) end: upper limit Returns: num_nodes_list: list of integers or: reversed(num_nodes_list) """ mode = 'incremental' if end < start: mode = 'decremental' start, end = end, start num_nodes_list = [start*(multiplier**x) for x in range(10) if start*(multiplier**x) < end] if mode == 'incremental': return num_nodes_list else: return reversed(num_nodes_list)
8b42931600cb14b84621f6619ef695f1adee641c
704,849
import time def timeit(func): """calculate time for a function to complete""" def wrapper(*args, **kwargs): start = time.time() output = func(*args, **kwargs) end = time.time() print('function {0} took {1:0.3f} s'.format( func.__name__, (end - start) * 1)) return output return wrapper
13a86c9475ce547a7b5e7e54ad7373f833920b41
704,850
def removeDuplicates(self, nums): """ :type nums: List[int] :rtype: int """ if len(nums) == 0: return 0 j = 0 len_n = len(nums) for i in range(len_n): if nums[j] != nums[i]: nums[j + 1] = nums[i] j += 1 return j + 1
3020be29ad6499dfcb1dcfae6a09b91a11ccfc38
704,851
def string2token(t,nl,nt): """ This function takes a string and returns a token. A token is a tuple where the first element specifies the type of the data stored in the second element. In this case the data types are limited to numbers, either integer, real or complex, and strings. The types a denoted as follows: i - integer f - float/real c - complex s - string For navigational purposes two more elements added to identify the line number (nl) the token was on, and the token number (nt) within the line. """ try: i_a = int(t) # # Toldiff should recognise that -0 and 0 are the same, however, in # a text based comparison that is not automatic so we have to force this. # if i_a == 0: i_a = 0 token = ("i",i_a,nl,nt) except ValueError: # # In Fortran double precision constants are often printed with a # "D" for the exponent rather than an "E", i.e. 1.0E+01 might be # printed as 1.0D+01 in Fortran. Python is not aware of this convention # so we need to replace any potential "D"-s to obtain valid floating # values. # z = t.replace("d","e") z = z.replace("D","e") try: i_f = float(z) # # Toldiff should recognise that -0.0 and 0.0 are the same, however, # in a text based comparison that is not automatic so we have to # force this. # if i_f == 0.0: i_f = 0.0 token = ("f",i_f,nl,nt) except ValueError: # # The handling of complex numbers is unlikely to work in practice # as in most cases complex numbers are printed as (1.0,2.0) # rather than 1.0+2.0j. Therefore it is impossible to reliably # distinguish between a complex number and a list of 2 real numbers. # try: i_c = complex(z) # # Toldiff should recognise that x-0.0j and x+0.0j and that # -0.0+y*j and 0.0+y*j are the same, however, in a text based # comparison that is not automatic so we have to force this. # if i_c.real == 0.0: i_c = complex(0.0,i_c.imag) if i_c.imag == 0.0: i_c = complex(i_c.real,0.0) token = ("c",i_c,nl,nt) except ValueError: token = ("s",t,nl,nt) return token
23fd5da01a49076b1fcf474fbe1047329ad7471a
704,852
def get_div(integer): """ Return list of divisors of integer. :param integer: int :return: list """ divisors = [num for num in range(2, int(integer**0.5)+1) if integer % num == 0] rem_divisors = [int(integer/num) for num in divisors] divisors += rem_divisors divisors.append(integer) res = list(set(divisors)) # remove duplicates res.sort() return res
4c40a2b2da1d9681c1d7ca69a53975dd27c7bdb8
704,853
def ifuse(inputs): """Fuse iterators""" value, extent = 0, 1 for i, ext in inputs: value = value * ext + i extent = extent * ext return (value, extent)
42c65ec62e637b668125ed27aad517d3301a7aff
704,854
from typing import get_origin def is_dict_type(tp): """Return True if tp is a Dict""" return ( get_origin(tp) is dict and getattr(tp, '_name', None) == 'Dict' )
3b9992b7b131e936472d4d0e2994ac476f0d0f76
704,855
def sum_up_validation_dataset(dataset, batch_size, repeat=True, number_of_repetitions=0): """Define how the validation dataset is suppose to behave during training. This function is applied to the validation dataset just before the actual training process. The characteristics defined here address how images are picked and how large the batch size is. Args: dataset (tensorflow dataset): The dataset to which the functions are applied. batch_size (int): Defines the number of images per validation step. repeat (boolean): If set to false the validation data is only considered once. If set to true, the dataset is either considered endlessly or number_of_repetitions times. number_of_repetitions (int): Defines how often the validation data is considered. Returns: The tensorflow dataset which the applied changes described above. """ if (repeat): if (number_of_repetitions > 0): dataset = dataset.batch(batch_size).repeat(number_of_repetitions) else: dataset = dataset.batch(batch_size).repeat() else: dataset = dataset.batch(batch_size) return dataset
9bab85eba802d5198bfd39bc42bd2fae5209d356
704,856
import webbrowser def getbrowser(): """ Get the name of the browser currently being used """ # Try to find the browser try: # Get the browser name webbrowser.get(using=None) # Catch an error except RuntimeError: # Return nothing return None
cc74f7db8cf82b32516a0f3b462ba91b7c48b21b
704,857
def _is_large_prime(num): """Inefficient primality test, but we can get away with this simple implementation because we don't expect users to be running print_fizzbuzz(n) for Fib(n) > 514229""" if not num % 2 or not num % 5: return False test = 5 while test*test <= num: if not num % test or not num % (test+2): return False test += 6 return True
090b641872d8d25d55e8f32296e3893f59518308
704,858
def cleanup_code(content: str): """Automatically removes code blocks from the code.""" # remove ```py\n``` if content.startswith('```') and content.endswith('```'): return '\n'.join(content.split('\n')[1:-1]) # remove `foo` return content.strip('` \n')
a026668f01e1641618c5b25b06396516410dbe1e
704,859
def ext_s(variable, value, substitution): """ext_s is a helper function for eq, without checking for duplicates or contradiction it adds a variable/value pair to the given substitution. `unify` deals with all of the related verification. @param variable: A LogicVariable @param value: A value that can either be a LogicVariable or literal. @param substitution: A set of tuples indicating equality for LogicVariables. @return: A new substitution with variable and value set as being equal. """ return substitution | {(variable, value)}
bced042fc8ea5882d4dc901e3b7df94c9b0d0893
704,860
import re def _include_matcher(keyword="#include", delim="<>"): """Match an include statement and return a (keyword, file, extra) duple, or a touple of None values if there isn't a match.""" rex = re.compile(r'^(%s)\s*%s(.*)%s(.*)$' % (keyword, delim[0], delim[1])) def matcher(context, line): m = rex.match(line) return m.groups() if m else (None, ) * 3 return matcher
b5f57a8f007870952810a591bb8e15c86af467b1
704,861
import operator def lcs(l1, l2, eq=operator.eq): """Finds the longest common subsequence of l1 and l2. Returns a list of common parts and a list of differences. >>> lcs([1, 2, 3], [2]) ([2], [1, 3]) >>> lcs([1, 2, 3, 3, 4], [2, 3, 4, 5]) ([2, 3, 4], [1, 3, 5]) >>> lcs('banana', 'baraban') (['b', 'a', 'a', 'n'], ['a', 'r', 'b', 'n', 'a']) >>> lcs('abraban', 'banana') (['b', 'a', 'a', 'n'], ['a', 'r', 'n', 'b', 'a']) >>> lcs([1, 2, 3], [4, 5]) ([], [4, 5, 1, 2, 3]) >>> lcs([4, 5], [1, 2, 3]) ([], [1, 2, 3, 4, 5]) """ prefs_len = [ [0] * (len(l2) + 1) for _ in range(len(l1) + 1) ] for i in range(1, len(l1) + 1): for j in range(1, len(l2) + 1): if eq(l1[i - 1], l2[j - 1]): prefs_len[i][j] = prefs_len[i - 1][j - 1] + 1 else: prefs_len[i][j] = max(prefs_len[i - 1][j], prefs_len[i][j - 1]) common = [] diff = [] i, j = len(l1), len(l2) while i and j: assert i >= 0 assert j >= 0 if eq(l1[i - 1], l2[j - 1]): common.append(l1[i - 1]) i -= 1 j -= 1 elif prefs_len[i - 1][j] >= prefs_len[i][j - 1]: i -= 1 diff.append(l1[i]) else: j -= 1 diff.append(l2[j]) diff.extend(reversed(l1[:i])) diff.extend(reversed(l2[:j])) return common[::-1], diff[::-1]
4b5d3cb9911a6834c006e78f7b40061695c464e2
704,863
def calc_sparsity(optimizer, total_params, total_quant_params): """ Returns the sparsity of the overall network and the sparsity of quantized layers only. Parameters: ----------- optimizer: An optimizer containing quantized model layers in param_groups[1]['params'] and non-quantized layers, such as BatchNorm, Bias, etc., in param_groups[1]['params']. total_params: Total number of parameters. total_quant_params: Number of quantized parameters. Returns: -------- sparsity_total: Sparsity of the overall network. sparsity_quant: Sparsity of quantized layers of the network. """ nonzero_elements_quant = 0 for layer in optimizer.param_groups[1]['params']: nonzero_elements_quant += layer[layer != 0].numel() nonzero_elements_total = 0 for layer in optimizer.param_groups[0]['params']: nonzero_elements_total += layer[layer != 0].numel() nonzero_elements_total += nonzero_elements_quant sparsity_total = (total_params - nonzero_elements_total) / total_params sparsity_quant = (total_quant_params - nonzero_elements_quant) / total_quant_params return sparsity_total, sparsity_quant
92ee924239ee8d7ac97aebba2958671043aa2d89
704,864
def swapKeys(d,keySwapDict): """ Swap keys in dictionary according to keySwap dictionary """ dNew = {} for key, keyNew in keySwapDict.iteritems(): if key in d: dNew[keyNew] = d[key] for key in d: if key not in keySwapDict: dNew[key] = d[key] return dNew
0d8917e224574ee0bf682fed10d367f3a5d2bc2f
704,865
import torch def getLayers(model): """ get each layer's name and its module :param model: :return: each layer's name and its module """ layers = {} root = '' def unfoldLayer(model, root): """ unfold each layer :param model: the given model or a single layer :param root: root name :return: """ # get all layers of the model layer_list = list(model.named_children()) for item in layer_list: name = item[0] module = item[1] layer_type = str(module).split('(')[0] sublayer_num = len(list(module.named_children())) # if current layer contains sublayers, add current layer name on its sublayers if sublayer_num > 0: name = root + ":" + name if root else name else: name = root + ":" + name + '(' + layer_type + ')' if root else name layers[name] = module # if current layer contains sublayers, unfold them if isinstance(module, torch.nn.Module): unfoldLayer(module, root=name) unfoldLayer(model, root) return layers
e1120460b35fa49fe8ad43cc9ce606c1d217a584
704,866
def buscaVizinhos(matrizCapacidades): """Função para buscar os vizihos de cada vertice""" vizinhos = {} for v in range(len(matrizCapacidades)): vizinhos[v] = [] for v, fluxos in enumerate(matrizCapacidades): for vizinho, fluxo in enumerate(fluxos): if fluxo > 0: vizinhos[v].append(vizinho) vizinhos[vizinho].append(v) return vizinhos
1e9ace4be94d80ae2637689b3d25ee1116714888
704,867
import json import os import shutil def _check_json(out_file, status, hold_file): """Function: _check_json Description: Private function for file_check function. Arguments: (input) out_file -> Path and file name of output file. (input) status -> Status of check. (input) hold_file -> Name of file if file check fail. (output) status -> True|False - Status of check. """ try: _ = json.load(open(out_file)) except ValueError: status = False print("\t\tError: %s is not in JSON format" % (out_file)) if not os.path.isfile(hold_file): shutil.copy2(out_file, hold_file) return status
616ef16200e07975cdaedbebfa3fcf21746c367b
704,868
def _prefAdj(coupling, leg): """Prefactor for the creation of an adjoint Only implemented for regular three-legged tensors with an (in, in, out) flow and their adjoints at the moment. """ if len(coupling) != 1: raise NotImplementedError("Only for three-legged tensors") flow = tuple(c[1] for c in coupling[0]) if flow != (True, True, False) and flow != (True, False, False): raise NotImplementedError("Only (in, in, out) and its adjoint allowed") if flow == (True, False, False): def loop(x): return x[::-1] else: def loop(x): return x fid = [c[0] for c in loop(coupling[0])].index(leg) if leg else None fpref = { 0: lambda key: 1. if loop(key[0])[1] % 2 == 0 else -1., 1: lambda key: 1. if loop(key[0])[0] % 2 == 0 else -1., 2: lambda key: 1., None: lambda key: 1. if loop(key[0])[2] % 2 == 0 else -1., }[fid] def su2pref(key): sign = (1, 1, -1) return 1. if sum(s * k for s, k in zip(sign, loop(key[0]))) % 4 == 0 else -1. return {'fermionic': fpref, 'SU(2)': su2pref}
e2889badba0cef27c4ce8c51ed14bda71524c3ec
704,869
import socket import logging def get_hostname(): """ :return: safely return the hostname """ hostname = "<Host Undetermined>" try: hostname = socket.gethostname() except Exception as e: logging.error(f"Could not get hostname.\nException: {e}") return hostname
d5420b275c336b1295b16216a473557a24d54a61
704,871
def dict_get(d, key, default=None): """:yaql:get Returns value of a dictionary by given key or default if there is no such key. :signature: dict.get(key, default => null) :receiverArg dict: input dictionary :argType dict: dictionary :arg key: key :argType key: keyword :arg default: default value to be returned if key is missing in dictionary. null by default :argType default: any :returnType: any (appropriate value type) .. code:: yaql> {"a" => 1, "b" => 2}.get("c") null yaql> {"a" => 1, "b" => 2}.get("c", 3) 3 """ return d.get(key, default)
5fb6a71e507f62eb530215385c97c56a75765df7
704,872
import subprocess def find_telomeres(seq, telomere="ACACCCTA", minlength=24): """Find telomere sequences with NCRF in a list of sequences Assumes that NCRF is in the PATH. Parameters ---------- seq : str Sequence to be scanned telomere : str Sequence of the telomere repeat. Default is ACACCCTA, corresponding to Blepharisma and Stentor telomere. minlength : int Minimum length of consecutive telomeres to detect (bp) """ ncrf_out = subprocess.run( ["NCRF", f"telomere:{telomere}", f"--minlength={str(minlength)}"], capture_output=True, input=str.encode(seq)) return(ncrf_out)
3ae2e190a86b39ca21f59dba13aa98d6eb8247a5
704,874
import torch def train_model(bert_model, dataloader_train, optimizer, scheduler, device): """The architecture's training routine.""" bert_model.train() loss_train_total = 0 for batch_idx, batch in enumerate(dataloader_train): # set gradient to 0 bert_model.zero_grad() batch = tuple(b.to(device) for b in batch) inputs = { "input_ids": batch[0], "token_type_ids": batch[1], "attention_mask": batch[2], "labels": batch[3], } loss, _ = bert_model( inputs["input_ids"], token_type_ids=inputs["token_type_ids"], attention_mask=inputs["attention_mask"], labels=inputs["labels"], return_dict=False, ) # Compute train loss loss_train_total += loss.item() loss.backward() # gradient accumulation torch.nn.utils.clip_grad_norm_(bert_model.parameters(), 1.0) optimizer.step() scheduler.step() # torch.save(bert_model.state_dict(), f"models/ BERT_ft_epoch{epoch}.model") loss_train_avg = loss_train_total / len(dataloader_train) return loss_train_avg
fafa12c999d3c2d9716298b1ae64bd2f53dd9d09
704,876
def get_date_row(repositories): """Generate row with dates.""" row = ["Date"] for repository in repositories: row.append("Sources") row.append("Issues") row.append("Correct") # one more for summary row.append("Sources") row.append("Issues") row.append("Correct") return row
460b5d47e631dac4e918965eca9c11d4f6085bb1
704,877
import pytz def as_utc(time): """Convert a time to a UTC time.""" return time.astimezone(pytz.utc)
716858e88daa43b61f5cedae72e74dafcf67d423
704,878
import itertools def encode(data): """Encode data using LZ78 compression. Args: data: A str to encode. Returns: A list of two-element tuples of int and str. """ dictionary = {} index = itertools.count(1) word = '' result = [] for character in data: new_word = word + character if new_word not in dictionary: result.append((dictionary.get(word, 0), character)) dictionary[new_word] = index.__next__() word = '' else: word = new_word # Corner-case: without this resulting list will be incomplete if word: result.append((dictionary.get(word[:-1], 0), word[-1:])) return result
fcfe0b294eed92812380a60d1ec1b642084c8dfe
704,879
def replace_num(s): """Remueve los numeros de los tweets""" for i in ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]: s = s.replace(i, "") return s
8d46f3e8d44cfc80f1efbb685b4a952c253763bd
704,880
def build_response(session_attributes, speechlet_response): """builds resopnse""" return { 'version': '1.0', 'sessionAttributes': session_attributes, 'response': speechlet_response }
96bc754e1a58300b2861851be678aa0e984845a8
704,881
def insertion_sort(L): """Implementation of insertion sort.""" n = len(L) if n < 2: return L for i in range(1, n): tmp = L[i] j = i while j > 0 and tmp < L[j - 1]: L[j] = L[j - 1] j -= 1 L[j] = tmp
ca7cbb5c676173ad10ce98d8b9e579a65afad0fb
704,882
def _do_nothing(string): """Makes the ConfigParser case sensitive.""" return string
4011cbe5b00bef5fe9fc13420bbd988729641676
704,883
def is_symmetric(mat): """Return whether a sy.Matrix is symmetric.""" n, nc = mat.shape return nc == n and all(mat[j,i] == mat[i,j] for i in range(n) for j in range(i+1, n))
e23cb03ec06f16584a99c82d66770491a6635ef5
704,884
def build_dict(*param_dicts, **param_dict): """ Create a merged dictionary from the supplied dictionaries and keyword parameters. """ merged_param_dict = param_dict.copy() for d in param_dicts: if d is not None: # log.info("param_dicts %r"%(d,)) merged_param_dict.update(d) return merged_param_dict
1f77e1ca81051913ed32ea5bfdb684cf468e27af
704,885
def Mode(hist): """Returns the value with the highest frequency. hist: Hist object returns: value from Hist """ p, x = max([(p, x) for x, p in hist.Items()]) return x
2db7d658ad58a80041c3f450104aada006b11eaf
704,886
def get_img_space(wsp, img): """ Find out what image space an image is in Note that this only compares the voxel->world transformation matrix to the reference image for each space. It is quite possible for two images to be in the same space but not be registered to one another. In this case, the returned space may not be accurate when determining whether a registration is required. :param wsp: Workspace object :param img: Image :return: Name of image space for ``img``, e.g. ``native``, ``struc`` """ img_space = None for space in ('native', 'calib', 'struc', 'std', 'custom'): ref = getattr(wsp.reg, "%sref" % space) if ref is not None and img.sameSpace(ref): img_space = space break if img_space is None: raise RuntimeError("Could not determine space for image: %s" % str(img)) return img_space
b455dd6300cf13cbba5d8e2d44685e06d8fb4cad
704,887
import os def get_dir(algorithm, mode): """ Used to determine the plotting folder Parameters ---------- algorithm: String The name of the current ANC algorithm running mode: String The MODE (precorded or anc) the server is currently running Returns ------- path : Path The path of the plots directory of diminish Raises ------ None """ base_dir = os.getcwd() results_dir = os.path.join(base_dir, f'plots/{algorithm}/{mode}/') if not os.path.isdir(results_dir): os.makedirs(results_dir) return results_dir
350e47f729bddbc7d85e306b6b6af5d6a14222ea
704,888
def str_to_bool(string): """Used as a type in argparse so that we get back a proper bool instead of always True """ return string.lower() in ("y", "yes", "1", "true")
ab54e7cff5721f91f78c90498c16d68f2760ee11
704,889
def new(nbits, prefix=b"", suffix=b"", initial_value=1, little_endian=False, allow_wraparound=False): """Create a stateful counter block function suitable for CTR encryption modes. Each call to the function returns the next counter block. Each counter block is made up by three parts: +------+--------------+-------+ |prefix| counter value|postfix| +------+--------------+-------+ The counter value is incremented by 1 at each call. Args: nbits (integer): Length of the desired counter value, in bits. It must be a multiple of 8. prefix (byte string): The constant prefix of the counter block. By default, no prefix is used. suffix (byte string): The constant postfix of the counter block. By default, no suffix is used. initial_value (integer): The initial value of the counter. Default value is 1. little_endian (boolean): If ``True``, the counter number will be encoded in little endian format. If ``False`` (default), in big endian format. allow_wraparound (boolean): This parameter is ignored. Returns: An object that can be passed with the :data:`counter` parameter to a CTR mode cipher. It must hold that *len(prefix) + nbits//8 + len(suffix)* matches the block size of the underlying block cipher. """ if (nbits % 8) != 0: raise ValueError("'nbits' must be a multiple of 8") # Ignore wraparound return {"counter_len": nbits // 8, "prefix": prefix, "suffix": suffix, "initial_value": initial_value, "little_endian": little_endian }
a0cdadcf6eb81ad323c205e08db97d094326f513
704,890