content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def PeerDownHasBgpNotification(reason): """Determine whether or not a BMP Peer Down message as a BGP notification. Args: reason: the Peer Down reason code (from the draft) Returns: True if there will be a BGP Notification, False if not """ return reason == 1 or reason == 3
8ee214798f6766916e8784dd907eeb45ff6620db
704,670
def set_purpose(slack_client, channel, purpose): """ Set the purpose of a given channel. """ response = slack_client.api_call("channels.setPurpose", purpose=purpose, channel=channel) return response
786a495b55300b955e2f7ec525117be75b251a07
704,671
import os def generate_aes_key(size=256): """ Generates aes key with specified size :param size: aes bits, default 256 :return: generated key bytes """ return os.urandom(int(size / 8))
604b626ae0996499c2d855ede91f70ded8f585cc
704,672
import json def decode_stderr_json(stderr): """ return a list of decoded json messages in stderr """ # - check for blank input if not stderr: # - nothing to do return list() # - split the input (based on newlines) into list of json strings output = list() for line in stderr.split('\n'): if not line: # - skip blank lines: no valid json or message to decode continue json_message = list() try: json_message = json.loads(line) except ValueError: # - if json cannot be decoded, just log as ERROR prefixed by '!' json_message = {'level': 'ERROR', 'message': '!' + line} output.append(json_message) return output
d527730d8d9a77a1ec434ee6203c4e08433306c9
704,673
def get_validate_result_form(tel_num, validate_code): """ Assemble form for get_validate_result :param tel_num: Tel number :param validate_code: Validate code from capcha image :return: Param in dict """ post_data_dict = dict() post_data_dict['source'] = 'wsyyt' post_data_dict['telno'] = tel_num post_data_dict['validcode'] = validate_code return post_data_dict
6340c97522a097c0cf96170e08466fb795e16dc3
704,674
import os def getFileInfoFromXML(thisfile): """ Get the PFN from the XML """ pfn = thisfile.getElementsByTagName("pfn")[0].getAttribute("name") # lfn will not be present in XML any longer, get it from the PFN - possible problem with LFN file name extensions # lfn = thisfile.getElementsByTagName("lfn")[0].getAttribute("name") lfn = os.path.basename(pfn) guid = thisfile.getAttribute("ID") return lfn, pfn, guid
788080388a4c7984f8646a944eefa04a7ce22536
704,675
def _serialize_noise_model(config): """Traverse the dictionary looking for noise_model keys and apply a transformation so it can be serialized. Args: config (dict): The dictionary to traverse Returns: dict: The transformed dictionary """ for k, v in config.items(): if isinstance(config[k], dict): _serialize_noise_model(config[k]) else: if k == 'noise_model': try: config[k] = v.to_dict(serializable=True) except AttributeError: # if .to_dict() fails is probably because the noise_model # has been already transformed elsewhere pass return config
f3453e174d5ba858b9eec678e7bc1574f74d50eb
704,676
import os def sequential_name(folder, basename): """ Given a proposed name for a file (string 'basename') to be saved in a folder (identified by its path in string 'folder'), produces a new name to use that avoids overwriting other files - as long as their names were made with this function, too. """ if not os.access(folder, os.F_OK): return '{:s}/{:s}'.format(folder, basename) else: existing_files = os.listdir(folder) matches = sum([basename in x for x in existing_files]) if matches == 0: return '{:s}/{:s}'.format(folder, basename) else: return '{:s}/{:s} ({:d})'.format(folder, basename, matches)
1a1afd78371da050ef6e44aa909d8c800f82ac21
704,677
def metric_max_over_ground_truths(metric_fn, predictions, ground_truths): """Take the average best score against all ground truth answers. This is a bit different than SQuAD in that there are multiple answers **and** predictions that we average over. For some situations (e.g., *top k* beams or multiple human references) we might want to calculate the average performance. In most cases, however, predictions will be a list of length 1. Args: metric_fn: Callable on (prediction, ground_truth). predictions: List of whitespace separated prediction tokens. ground_truths: List of whitespace separated answer tokens. Returns: max_score: Max output of metric_fn. """ all_metrics = [] for prediction in predictions: scores_for_ground_truths = [] for ground_truth in ground_truths: score = metric_fn(prediction, ground_truth) scores_for_ground_truths.append(score) all_metrics.append(max(scores_for_ground_truths)) return sum(all_metrics) / len(all_metrics)
7c78fc1cca29bc9784a4e4687d794c1f2b6872c9
704,678
def attSummaryDict(request, reqs, flist): """ Return a dictionary summarizing the field values for the chosen most interesting fields """ sumd = {} for req in reqs: for f in flist: if f in req and req[f]: if not f in sumd: sumd[f] = {} if not req[f] in sumd[f]: sumd[f][req[f]] = 0 sumd[f][req[f]] += 1 ## convert to ordered lists suml = [] for f in sumd: itemd = {} itemd['field'] = f iteml = [] kys = sumd[f].keys() kys.sort() for ky in kys: iteml.append({ 'kname' : ky, 'kvalue' : sumd[f][ky] }) if 'sortby' in request.GET and request.GET['sortby'] == 'count': iteml = sorted(iteml, key=lambda x:x['kvalue'], reverse=True) else: iteml = sorted(iteml, key=lambda x:str(x['kname']).lower()) itemd['list'] = iteml suml.append(itemd) suml = sorted(suml, key=lambda x:x['field']) return suml
bafbbe51555cb46c664d33ea31a0e36c56152fa9
704,679
def ext_bottom_up_cut_rod(price, length): """ bottom up implementation of cut rod memoized algorithm """ incomelst = [float("-Inf") for _ in range(length + 1)] cutlst = [0 for _ in range(length + 1)] # set zero income for zero length incomelst[0] = 0 for j in range(1, length + 1): income = float("-Inf") for i in range(j): if income < price[i] + incomelst[j - i - 1]: income = price[i] + incomelst[j - i - 1] cutlst[j] = i + 1 # set income for current length incomelst[j] = income # income for whole rod return incomelst, cutlst
7dd8c43afa9f71793d372b474963ff84d2ce607f
704,680
def _build_config_dict(cfg_node): """ Updates the config dict provided from the given etcd node, which should point at a config directory. """ config_dict = {} for child in cfg_node.children: key = child.key.rsplit("/").pop() value = str(child.value) config_dict[key] = value return config_dict
567fca19a6e1890c881170200ba44fc262148948
704,681
import time def stamp_to_ymd(timestamp): """ Caller sends a timestamp in seconds of epoch. Return string for year month day of that time as YYYYMMDD' as used by url requests, as in http://<fitsstore_server>/qaforgui/20130616 parameters: <float>, seconds of epochs. return: <string>, YYYYMMDD of passed time. """ return time.strftime("%Y%m%d", time.localtime(timestamp))
2928e93a48f1a5c3abdddcb6285bed7b0cebb369
704,682
import os def exists(b, d, n): """Check if the folder specified by the given parameters exists""" return os.path.isdir("../Output/B" + str(b) + " D" + str(d) + " N" + str(n))
b94f8bfb38351127e77fa9f19b706906d9805e82
704,683
def not_contains(a, b): """Evaluates a does not contain b""" result = False if b in a else True return result
a0dc087049c8e93c1acdf0e59e3530a6ff8b54e5
704,684
def create_func_result_identifier(func, params_str, key=None, key_separator="__"): """ Creates a string of the following format: If ``key`` is None: ``<FUNC_NAME><PARAMS_STR>`` If ``key`` is not None: ``<FUNC_NAME><PARAMS_STR>__key`` In both cases, ``<FUNC_NAME>`` represents the name of the function object ``func`` (via ``func.__name__``) and ``<PARAMS_STR>`` represents a string object given by ``params_str`` (e.g., obtained via the method ``create_params_str``). In the latter case, ``key`` represents the function return identifier (e.g., for a multi-value result, the ``key`` is the identifier for one such value) and it is separated by a double underscore. The double underscore separator can be changed by specifying the default parameters ``key_separator``. **IMPORTANT NOTE:** This function is rather specific to time series characteristic features (such as tsfresh), so it should be used only internally. """ return f"{func.__name__}{params_str}{key_separator}{key}" if key is not None else f"{func.__name__}{params_str}"
6f3a7a6a8a94629dae7817403d78ef1f970ad5b2
704,685
import csv def import_town(data_file): """ Reads town raster data from a CSV file. Parameters ---------- data_file : str Name of CSV raster data file to use for the town. Returns ------- town : list List (cols) of lists (rows) representing raster data of the town. """ # Read in town data and format it as a list of lists with open(data_file, newline = "") as f: reader = csv.reader(f, quoting=csv.QUOTE_NONNUMERIC) town = [] for row in reader: rowlist = [] for value in row: rowlist.append(value) town.append(rowlist) return town
b7749dfd4d698fddfe610c6a51c8ccc43c375cc2
704,686
def pe44(limit=1500): """ >>> pe44() (5482660, 7042750, 1560090, 2166, 1019) """ pents = [i * (3 * i - 1) >> 1 for i in range(1, limit << 1)] ps = set(pents) for i in range(limit): p1 = pents[i] for j in range(i + 1, (limit << 1) - 1): p2 = pents[j] diff = p2 - p1 if p2 + p1 in ps and diff in ps: return (diff, p2, p1, j, i) return None
e41f513c518b502de0c47f3a70390f9df01a1868
704,687
def sanitize_markdown(markdown_body): """ There are some symbols used in the markdown body, which when go through Markdown -> HTML conversion, break. This does a global replace on markdown strings for these symbols. """ return markdown_body.replace( # This is to solve the issue where <s> and <e> are interpreted as HTML tags '&lt;', '<').replace( '&gt;', '>').replace( '\<s>', '&lt;s&gt;').replace( '\<e>', '&lt;e&gt;')
adf21a9bbea1a95f0f4c0aca8d61ab6d69627074
704,688
def _build_jinja2_expr_tmp(jinja2_exprs): """Build a template to evaluate jinja2 expressions.""" exprs = [] tmpls = [] for var, expr in jinja2_exprs.items(): tmpl = f"{var}: >-\n {{{{ {var} }}}}" if tmpl not in tmpls: tmpls.append(tmpl) if expr.strip() not in exprs: exprs.append(expr.strip()) return "\n".join(exprs + tmpls)
3e5d944345316a40b7b8052f9b13801228607099
704,689
def load_coco_name(path): """Load labels from coco.name """ coco = {} with open(path, 'rt') as file: for index, label in enumerate(file): coco[index] = label.strip() return coco
2da456b7c2879ec5725172280dacbcaaacd86bfc
704,690
import base64 import gzip import json def decompress_metadata_string_to_dict(input_string): # pylint: disable=invalid-name """ Convert compact string format (dumped, gzipped, base64 encoded) from IonQ API metadata back into a dict relevant to building the results object on a returned job. Parameters: input_string (str): compressed string format of metadata dict Returns: dict: decompressed metadata dict """ if input_string is None: return None encoded = input_string.encode() decoded = base64.b64decode(encoded) decompressed = gzip.decompress(decoded) return json.loads(decompressed)
c521da786d2a9f617c560916cc5f058b20cb3e21
704,691
import struct import socket def inet_atoni(ip): """Like inet_aton() but returns an integer.""" return struct.unpack('>I', socket.inet_aton(ip))[0]
3bd18b7aecf9a5a45033c7873163ee1387cb8a13
704,692
import re def rep_unicode_in_code(code): """ Replace unicode to str in the code like '\u003D' to '=' :param code: type str :return: type str """ pattern = re.compile('(\\\\u[0-9a-zA-Z]{4})') m = pattern.findall(code) for item in set(m): code = code.replace(item, chr(int(item[2:], 16))) # item[2:]去掉\u return code
70e28ea741f0347190628876b59e27a56a5c0ccf
704,693
def f(spam, eggs): """ :type spam: list of string :type eggs: (bool, int, unicode) """ return spam, eggs
7d315898332b099eb1105f77b08bfe69e29c051e
704,694
def print_person(first, last, middle=None): """Prints out person's names This funciton prints out a person's name. It's not too useful Args: first (str): This person's first name last (str): This person's last name middle (str): Optional. This person's middle name """ middle=middle or "" return '{f} {m} {l}'.format(f=first, m=middle, l=last)
643ce351ec13a076c9fd36af39c97505084f1437
704,695
def _chomp_element(base, index, value): """Implementation of perl = and chomp on an array element""" if value is None: value = '' base[index] = value.rstrip("\n") return len(value) - len(base[index])
66cfde7c8d8f2c92f0eebb23f717bf50b676ca31
704,696
import time def generate_nonce(): """ Generates nonce for signature Returns: nonce (int) : timestamp epoch """ return int(time.time() + 100)
c439fc6598b4f5359d71bde8865afacb6162df19
704,697
import ast def is_py3(file_path): """Check if code is Python3 compatible.""" # https://stackoverflow.com/a/40886697 code_data = open(file_path, "rb").read() try: ast.parse(code_data) except SyntaxError: return False return True
78a48bdcc682108ce4fbe6fffe4a235898beec1c
704,699
import time def getDate(): """获得时间""" return time.localtime()
6f4f127b96ab6f754cc20e76219a54d039938320
704,700
def reverse(x): """ :type x: int :rtype: int """ new_str = str(x) i = 1 rev_str = new_str[::-1] if rev_str[-1] == "-": rev_str = rev_str.strip("-") i = -1 if (int(rev_str)>=2**31): return 0 return (int(rev_str)) * i
5775fe83f500ac844fa9fc94a4d71fc3bb6f165b
704,701
def part_1(input_data: list[int]) -> int: """Count the number of times a depth measurement increases from the previous measurement. Args: input_data (str): depths Returns: int: number of depth increases """ inc_count = 0 for i, depth in enumerate(input_data): if i != 0 and input_data[i] > input_data[i - 1]: inc_count += 1 return inc_count
3ee506aca019f9393c93ced75e430d53b31a9fc2
704,702
def single_varint(data, index=0): """ The single_varint function processes a Varint and returns the length of that Varint. :param data: The data containing the Varint (maximum of 9 bytes in length as that is the maximum size of a Varint). :param index: The current index within the data. :return: varint, the processed varint value, and index which is used to identify how long the Varint was. """ # If the decimal value is => 128 -- then first bit is set and # need to process next byte. if ord(data[index:index+1]) >= 128: # Check if there is a three or more byte varint if ord(data[index + 1: index + 2]) >= 128: raise ValueError varint = (ord(data[index:index+1]) - 128) * 128 + ord( data[index + 1: index + 2]) index += 2 return varint, index # If the decimal value is < 128 -- then first bit is not set # and is the only byte of the Varint. else: varint = ord(data[index:index+1]) index += 1 return varint, index
55b052300cc0cf5ac2fd8f7451ac121b408c1313
704,703
import glob def tumor_list(version): """ version: cross validation version and train or val """ path_list = [] for i in version: paths = sorted(glob.glob(f'./data/tumor_-150_150/{i}/label_*/*.npy')) path_list.extend(paths) return path_list
da390686072613177a4f3f5b483d980640090d1c
704,704
from typing import Tuple def _color_int_to_rgb(integer: int) -> Tuple[int, int, int]: """Convert an 24 bit integer into a RGB color tuple with the value range (0-255). Parameters ---------- integer : int The value that should be converted Returns ------- Tuple[int, int, int]: The resulting RGB tuple. """ return ((integer >> 16) & 255, (integer >> 8) & 255, integer & 255)
df3eb5ad92d9383b0e6fe5c1603e0caec0df5c45
704,705
def label2binary(y, label): """ Map label val to +1 and the other labels to -1. Paramters: ---------- y : `numpy.ndarray` (nData,) The labels of two classes. val : `int` The label to map to +1. Returns: -------- y : `numpy.ndarray` (nData,) Maps the val label to +1 and the other label to -1. """ return (2*(y == label).astype(int))-1
5bce8491e9eef3a8c36b784ee0e252c641b24fdf
704,706
def in_suit3(list, list0): """ test 2 suits of street numbers if they have crossed numbers For example: "22-27" in "21-24"retruns True :param a: Int of number :param b: String List of number :return: boolean """ text = list.replace("-", "") text0 = list0.replace("-", "") if ("-" in list) and ("-" in list0) and (text.isdigit() is True) and (text0.isdigit() is True): list1 = list.split("-") x = int(list1[0]) suit = set() suit.add(x) while x < int(list1[len(list1) - 1]): x += 1 suit.add(x) suit.add(int(list1[len(list1) - 1])) list2 = list0.split("-") y = int(list2[0]) suit0 = set() suit0.add(y) while y < int(list2[len(list2) - 1]): y += 1 suit0.add(y) suit0.add(int(list2[len(list2) - 1])) temp = [item for item in suit if item in suit0] if len(temp) > 0: return True return False
57409220b93c66ab4b957a05713e5e89b380253a
704,707
def extract_power(eeg, D=3, dt=0.2, start=0): """ extract power vaules for image Parameters ---------- seizure : EEG | dict eeg data D : int, optional epoch duration, by default 3 dt : float, optional time step (seconds), by default 0.2 start : int, optional time to start, by default 0 Returns ------- baseline_ex_power : ndarray baseline power seizure_ex_power : ndarray seizure power """ assert int(D/dt)*dt == D num_steps = int(D/dt) seiz = eeg['seizure']['eeg'] sfreq = seiz.info['sfreq'] onset = seiz.annotations.onset[0] - (seiz.first_samp/sfreq) + start first = int(onset/dt) baseline_ex_power = eeg['baseline']['ave_power'][:, :num_steps] seizure_ex_power = eeg['seizure']['ave_power'][:, first:first+num_steps] return baseline_ex_power, seizure_ex_power
04c3fed38fa2a2d46ba7edee4bb3f04011d9d2a7
704,708
def build_training_response(mongodb_result, hug_timer, remaining_count): """ For reducing the duplicate lines in the 'get_single_training_movie' function. """ return {'movie_result': list(mongodb_result)[0], 'remaining': remaining_count, 'success': True, 'valid_key': True, 'took': float(hug_timer)}
77d541957ff9abaa51bd3eb7fd06b550f41291e2
704,709
def calc_fm_perp_for_fm_loc(k_loc_i, fm_loc): """Calculate perpendicular component of fm to scattering vector.""" k_1, k_2, k_3 = k_loc_i[0], k_loc_i[1], k_loc_i[2] mag_1, mag_2, mag_3 = fm_loc[0], fm_loc[1], fm_loc[2] mag_p_1 = (k_3*mag_1 - k_1*mag_3)*k_3 - (k_1*mag_2 - k_2*mag_1)*k_2 mag_p_2 = (k_1*mag_2 - k_2*mag_1)*k_1 - (k_2*mag_3 - k_3*mag_2)*k_3 mag_p_3 = (k_2*mag_3 - k_3*mag_2)*k_2 - (k_3*mag_1 - k_1*mag_3)*k_1 return mag_p_1, mag_p_2, mag_p_3
00ba68c74d781748f39d2a577f227316dc523f0f
704,710
import glob import os def find_output(directory, extension="out", abspath=True): """ Find output file in a directory. Parameters ---------- directory : str Path to folder in which output should be located. extension : str File extension of output file (default: 'out'). abspath : bool Whether to return absolute path (default: True). Returns ------- outpath : str Path to output file (relative or absolute, default: absolute). """ dir_list = [fn for fn in glob.glob(directory+"/*."+extension) if not os.path.basename(fn).startswith("slurm")] if len(dir_list) != 1: err = f"Could not determine unique .{extension} file in {directory}/ !" raise FileNotFoundError(err) else: outpath = dir_list[0] if abspath: absdir = os.path.abspath(directory) outpath = os.path.join(absdir, dir_list[0]) return outpath
bb45e97990ea0a6ec98c9212ce9aa1b9effe3040
704,711
def trim_mismatches(gRNA): """ trim off 3' mismatches 1. first trim to prevent long alignments past end of normal expressed gRNAs 2. second trim to mismatches close to end of gRNA """ pairing = gRNA['pairing'] # 1. index of right-most mismatch before index -40 # if no MM is found trim will equal 0 trim = pairing.rfind('.', 0, -40)+1 # 2. while '.' in pairing[trim:]: mm = pairing.find('.', trim) # if distance to next mismatch is less than 4 then trim # otherwise stop trimming if mm-trim < 4: # trim += mm+1 trim = mm+1 else: break # trim alignment information if trim > 0: if gRNA['strand'] == 'coding': gRNA['circle_end'] -= trim else: gRNA['circle_start'] += trim gRNA['mRNA_start'] += trim gRNA['length'] -= trim gRNA['mRNA_seq'] = gRNA['mRNA_seq'][trim:] gRNA['gRNA_seq'] = gRNA['gRNA_seq'][trim:] gRNA['pairing'] = gRNA['pairing'][trim:] gRNA['mismatches'] = gRNA['pairing'].count('.') return gRNA
ab0bed78d29d64e9218201a561bb857fd80ed885
704,712
def WTERMSIG(status): """Return the signal which caused the process to exit.""" return 0
d4f45d41de95308c4a16f374e58c16b4384f8fc0
704,713
import io def create_toplevel_function_string(args_out, args_in, pm_or_pf): """ Create a string for a function of the form: def hl_func(x_0, x_1, x_2, ...): outputs = (...) = calc_func(...) header = [...] return DataFrame(data, columns=header) Parameters ---------- args_out: iterable of the out arguments args_in: iterable of the in arguments pm_or_pf: iterable of strings for object that holds each arg Returns ------- a String representing the function """ fstr = io.StringIO() fstr.write("def hl_func(pm, pf") fstr.write("):\n") fstr.write(" from pandas import DataFrame\n") fstr.write(" import numpy as np\n") fstr.write(" import pandas as pd\n") fstr.write(" def get_values(x):\n") fstr.write(" if isinstance(x, pd.Series):\n") fstr.write(" return x.values\n") fstr.write(" else:\n") fstr.write(" return x\n") fstr.write(" outputs = \\\n") outs = [] for ppp, attr in zip(pm_or_pf, args_out + args_in): outs.append(ppp + "." + attr + ", ") outs = [m_or_f + "." + arg for m_or_f, arg in zip(pm_or_pf, args_out)] fstr.write(" (" + ", ".join(outs) + ") = \\\n") fstr.write(" " + "applied_f(") for ppp, attr in zip(pm_or_pf, args_out + args_in): # Bring Policy parameter values down a dimension. if ppp == "pm": attr += "[0]" fstr.write("get_values(" + ppp + "." + attr + ")" + ", ") fstr.write(")\n") fstr.write(" header = [") col_headers = ["'" + out + "'" for out in args_out] fstr.write(", ".join(col_headers)) fstr.write("]\n") if len(args_out) == 1: fstr.write(" return DataFrame(data=outputs," "columns=header)") else: fstr.write(" return DataFrame(data=np.column_stack(" "outputs),columns=header)") return fstr.getvalue()
44d78a9d0a3146b4008868073e5828422b819cc8
704,714
def is_live_request(request): """ Helper to differentiate between live requests and scripts. Requires :func:`~.request_is_live_tween_factory`. """ return request.environ.get("LIVE_REQUEST", False)
1e5e64901715131f363d6d343acbd4d631cf6b6f
704,715
from typing import OrderedDict def pyvcf_calls_to_sample_info_list(calls): """ Given pyvcf.model._Call instances, return a dict mapping each sample name to its per-sample info: sample name -> field -> value """ return OrderedDict( (call.sample, call.data._asdict()) for call in calls)
937a748b3a0ff26a28ff4a4db5e1505dbb927ff9
704,716
from typing import Optional def injection_file_name( science_case: str, num_injs_per_redshift_bin: int, task_id: Optional[int] = None ) -> str: """Returns the file name for the raw injection data without path. Args: science_case: Science case. num_injs_per_redshift_bin: Number of injections per redshift major bin. task_id: Task ID. """ file_name = f"injections_SCI-CASE_{science_case}_INJS-PER-ZBIN_{num_injs_per_redshift_bin}.npy" if task_id is not None: file_name = file_name.replace(".npy", f"_TASK_{task_id}.npy") return file_name
57b034b6a60c317f0c071c1313d0d99f2802db30
704,717
def query_add(session, *objs): """Add `objs` to `session`.""" for obj in objs: session.add(obj) session.commit() return objs
95ffa9e0f5a4a9255f8b0b063c5bd092f0f66039
704,719
def sd_title(bs4_object, target=None): """ :param bs4_object: An object of class BeautifulSoup :param target: Target HTML tag. Defaults to class:title-text, a dict. :return: Returns paper title from Science Direct """ if target is None: target = {"class": "title-text"} return bs4_object.find_all("span", target)[0].text
8429fe680fafb86c773a0cd2b3280e893b95fc9a
704,720
def split_formula(formula, net_names_list): """ Splits the formula into two parts - the structured and unstructured part. Parameters ---------- formula : string The formula to be split, e.g. '~ 1 + bs(x1, df=9) + dm1(x2, df=9)'. net_names_list : list of strings A list of all network names defined by the user. Returns ------- structured_part : string A string holding only the structured part of the original formula. unstructured_terms: list of strings A list holding all the unstructured parts of the original formula. """ structured_terms = [] unstructured_terms = [] # remove spaces the tilde and split into formula terms formula = formula.replace(' ','') formula = formula.replace('~','') formula_parts = formula.split('+') # for each formula term for part in formula_parts: term = part.split('(')[0] # if it an unstructured part if term in net_names_list: # append it to a list unstructured_terms.append(part) else: structured_terms.append(part) # join the structured terms together again structured_term = '+'.join(structured_terms) return structured_term, unstructured_terms
1fce8617cbdaf767c1aebb6d0d685ca63975c820
704,721
from sys import intern def joinHostmask(nick, ident, host): """nick, user, host => hostmask Joins the nick, ident, host into a user hostmask.""" assert nick and ident and host return intern('%s!%s@%s' % (nick, ident, host))
e039f6afe37638a24f07a924bd537e6f6b6eb415
704,722
from itertools import combinations from typing import List from functools import reduce from operator import mul def max_triple_product_bare_bones(nums: List[int]) -> int: """ A bare-bones O(n3) method to determine the largest product of three numbers in a list :param nums: the list of numbers :return: the highest prodict """ return max([reduce(mul, lst, 1) for lst in combinations(nums, 3)])
8053bc6e35120f6ee8eca2b24e81cbaa7713dfb3
704,723
import requests def analyze_comments_page(username, repo, per_page, page, print_comments, print_stage_results): """ Analyzes one page of GitHub comments. Helping function. Parameters ---------- username : str The GitHub alias of the repository owner repo : str The GitHub repository name per_page : int The number of comments on the page (from 0 to 100) page : int The page number of the results to fetch print_comments : bool If True, each fetched comment and its analysis will be printed print_stage_results : bool If True, final statistics of the analyzed comments will be printend in the end Returns ------- total : int The number of comments fetched (if number of comments on the page is less than per_page parameter all the available comments will be processed and their number will be returned. Else, equal to per_page) pos : int The number of positive comments fetched neg : int The number of negative comments fetched neut : int The number of neutral comments fetched """ total = 0 pos = 0 neg = 0 neut = 0 print("Processing page #"+str(page)+"...\n") query = {'per_page': per_page, 'page': page} resp = requests.get("https://api.github.com/repos/" + username+"/"+repo+"/issues/comments", params=query) comments = resp.json() for comment in comments: total = total+1 if print_comments: print(str(total) + '. ' + comment.get("body")) query = {'text': comment.get("body")} response = requests.post( "http://text-processing.com/api/sentiment/", data=query) if print_comments: print(response.json()) print('\n') sentiment = response.json().get("label") if sentiment == 'pos': pos = pos+1 elif sentiment == 'neg': neg = neg+1 else: neut = neut+1 if print_stage_results: print('Processed: '+str(total)) print('Positive comments: '+str(pos)) print('Negative comments: '+str(neg)) print('Neutral comments: '+str(neut)) return total, pos, neg, neut
e3d153a0319db0bc723df65cb8a92533f9b37b82
704,725
def get_remotes(y, x): """ For a given pair of ``y`` (tech) and ``x`` (location), return ``(y_remote, x_remote)``, a tuple giving the corresponding indices of the remote location a transmission technology is connected to. Example: for ``(y, x) = ('hvdc:region_2', 'region_1')``, returns ``('hvdc:region_1', 'region_2')`` """ y_remote = y.split(':')[0] + ':' + x x_remote = y.split(':')[1] return (y_remote, x_remote)
3c479d818947362349982c77a9bbd87a97a3d4d5
704,726
from typing import List def ingrid(x: float, y: float, subgrid: List[int]) -> bool: """Check if position (x, y) is in a subgrid""" i0, i1, j0, j1 = subgrid return (i0 <= x) & (x <= i1 - 1) & (j0 <= y) & (y <= j1 - 1)
d296d8a7abe5eeb3da8d57691755a2bd19dd15b6
704,727
from typing import Union from pathlib import Path from typing import Any import json def load_jsonl(path: Union[Path, str]) -> list[dict[str, Any]]: """ Load from jsonl. Args: path: path to the jsonl file """ path = Path(path) return [json.loads(line) for line in path.read_text().splitlines()]
a59d2920bfa491b1d4daa693b5e2e1b4846d6fc6
704,728
import subprocess import re def git_version(): """ Get the full and python standardized version from Git tags (if possible) """ try: # Full version includes the Git commit hash full_version = subprocess.check_output('git describe --dirty', shell=True).decode("utf-8").strip(" \n") # Python standardized version in form major.minor.patch.post<build> version_regex = re.compile(r"v?(\d+\.\d+(\.\d+)?(-\d+)?).*") match = version_regex.match(full_version) if match: std_version = match.group(1).replace("-", ".post") else: raise RuntimeError("Failed to parse version string %s" % full_version) return full_version, std_version except: # Any failure, return None. We may not be in a Git repo at all return None, None
12f90dc2dc6cd620acff215aa2b0ad20079a2484
704,729
def wrap(get_io_helper_func): """A decorator that takes one argument. The argument should be an instance of the helper class returned by new_helper(). This decorator wraps a method so that is may perform asynchronous IO using the helper instance. The method being wrapped should take a keyword argument 'io_helper' which will be set to the helper instance passed in.""" def decorator_factory(func): def func_wrapper(*args, **kwargs): if not "io_helper" in kwargs: kwargs["io_helper"] = get_io_helper_func() helper = kwargs["io_helper"] helper._generator = func(*args, **kwargs) helper._next() func_wrapper.__doc__ = func.__doc__ func_wrapper.__name__ = func.__name__ return func_wrapper return decorator_factory
f2cdd8009d1722a81d848ab05c3cb6f3acaf5e50
704,730
def internal_superset_url(): """The URL under which the Superset instance can be reached by from mara (usually circumventing SSOs etc.)""" return 'http://localhost:8088'
8a66c1d2c0587e9e6a563d08506606d389c2e6be
704,731
import tempfile def temp(): """ Create a temporary file Returns ------- str Path of temporary file """ handle, name = tempfile.mkstemp() return name
5955f3ceabd30ba5bb487677d9382253e1fde50a
704,732
def ignoreNamePath(path): """ For shutil.copytree func :param path: :return: """ path += ['.idea', '.git', '.pyc'] def ignoref(directory, contents): ig = [f for f in contents if (any([f.endswith(elem) for elem in path]))] return ig return ignoref
9d51d53c8dae8fb2322c3f90ea0f451731395816
704,733
def row2string(row, sep=', '): """Converts a one-dimensional numpy.ndarray, list or tuple to string Args: row: one-dimensional list, tuple, numpy.ndarray or similar sep: string separator between elements Returns: string representation of a row """ return sep.join("{0}".format(item) for item in row)
f81a2ec54b8c37285715cadca4458918962440b9
704,734
def build_aggregation(facet_name, facet_options, min_doc_count=0): """Specify an elasticsearch aggregation from schema facet configuration. """ exclude = [] if facet_name == 'type': field = 'embedded.@type' exclude = ['Item'] elif facet_name.startswith('audit'): field = facet_name else: field = 'embedded.' + facet_name agg_name = facet_name.replace('.', '-') facet_type = facet_options.get('type', 'terms') facet_length = 200 if facet_options.get('length') == 'long': facet_length = 3000 if facet_type in ['terms', 'typeahead']: agg = { 'terms': { 'field': field, 'min_doc_count': min_doc_count, 'size': facet_length, }, } if exclude: agg['terms']['exclude'] = exclude elif facet_type == 'exists': agg = { 'filters': { 'filters': { 'yes': { 'bool': { 'must': { 'exists': {'field': field} } } }, 'no': { 'bool': { 'must_not': { 'exists': {'field': field} } } }, }, }, } else: raise ValueError('Unrecognized facet type {} for {} facet'.format( facet_type, field)) return agg_name, agg
b8c3f337143a229401b9a41a8fde8903027cf67e
704,735
def spawn(pool): """spawn a greenlet it will be automatically killed after the test run """ return pool.spawn
fadea4b814e77f7fb26af27f0cc7bce1189a7dcf
704,736
def inline(text): """ Convert all newline characters to HTML entities: &#10; This can be used to prevent Hypertag from indenting lines of `text` when rendering parent nodes, and to safely insert `text` inside <pre>, <textarea>, or similar elements. """ return text.replace('\n', '&#10;')
658f7e5adbf5747ea069fad8a9599e9bd499a381
704,737
def get_bq_col_type(col_type): """ Return correct SQL column type representation. :param col_type: The type of column as defined in json schema files. :return: A SQL column type compatible with BigQuery """ lower_col_type = col_type.lower() if lower_col_type == 'integer': return 'INT64' if lower_col_type == 'string': return 'STRING' if lower_col_type == 'float': return 'FLOAT64' if lower_col_type == 'numeric': return 'DECIMAL' if lower_col_type == 'time': return 'TIME' if lower_col_type == 'timestamp': return 'TIMESTAMP' if lower_col_type == 'date': return 'DATE' if lower_col_type == 'datetime': return 'DATETIME' if lower_col_type == 'bool': return 'BOOL' return 'UNSET'
86cac08a04d804cc6addbeee86014f1aa6d35735
704,738
def col(loc, strg): """ Returns current column within a string, counting newlines as line separators. The first column is number 1. Note: the default parsing behavior is to expand tabs in the input string before starting the parsing process. See :class:`ParserElement.parseString` for more information on parsing strings containing ``<TAB>`` s, and suggested methods to maintain a consistent view of the parsed string, the parse location, and line and column positions within the parsed string. """ s = strg return 1 if 0 < loc < len(s) and s[loc - 1] == "\n" else loc - s.rfind("\n", 0, loc)
0dfc4387e391c4823939350ad19c60d106211a58
704,739
import unicodedata def remove_accents(string): """ Removes unicode accents from a string, downgrading to the base character """ nfkd = unicodedata.normalize('NFKD', string) return u"".join([c for c in nfkd if not unicodedata.combining(c)])
41c8e05aa8982c85cf5cf2135276cdb5e26fefec
704,740
def parse_range(rng, dictvars={}): """Parse a string with an integer range and return a list of numbers, replacing special variables in dictvars.""" parts = rng.split('-') if len(parts) not in [1, 2]: raise ValueError("Bad range: '%s'" % (rng,)) parts = [int(i) if i not in dictvars else dictvars[i] for i in parts] start = parts[0] end = start if len(parts) == 1 else parts[1] if start > end: end, start = start, end return range(start, end + 1)
214109a71c84d06241e29cacaa052d9ce00302c5
704,741
def is_odd(num: int) -> bool: """Is num odd? :param num: number to check. :type num: int :returns: True if num is odd. :rtype: bool :raises: ``TypeError`` if num is not an int. """ if not isinstance(num, int): raise TypeError("{} is not an int".format(num)) return num % 2 == 1
0e5781596a99909e58583859948332c3afb06fb0
704,742
import runpy import imp def mod_from_file(mod_name, path): """Runs the Python code at path, returns a new module with the resulting globals""" attrs = runpy.run_path(path, run_name=mod_name) mod = imp.new_module(mod_name) mod.__dict__.update(attrs) return mod
3ea8109d912582555b76816f55fbb632ba82f189
704,743
def interpolation(x0: float, y0: float, x1: float, y1: float, x: float) -> float: """ Performs interpolation. Parameters ---------- x0 : float. The coordinate of the first point on the x axis. y0 : float. The coordinate of the first point on the y axis. x1 : float. The coordinate of the second point on the x axis. y1 : float. The coordinate of the second point on the y axis. x : float. A value in the interval (x0, x1). Returns ------- float. Is the interpolated or extrapolated value. Example ------- >>> from pymove.utils.math import interpolation >>> x0, y0, x1, y1, x = 2, 4, 3, 6, 3.5 >>> print(interpolation(x0,y0,x1,y1,x), type(interpolation(x0,y0,x1,y1,x))) 7.0 <class 'float'> """ return y0 + (y1 - y0) * ((x - x0) / (x1 - x0))
f8fc96c6dc6c2eeeeceb22f92b32023f3873fe3e
704,744
import collections def product_counter_v3(products): """Get count of products in descending order.""" return collections.Counter(products)
22c57d50dc36d3235e6b8b642a4add95c9266687
704,745
def rossler(x, y, z, a, b, c): """ Rössler System of Ordinary Differential Equations """ dx = - y - z dy = x + a*y dz = b + z*(x - c) return dx, dy, dz
bcf27c7ff8223681d6dc7d0c49497e975b826d80
704,747
import re def get_extension(filename): """ Extract file extension from filename using regex. Args: filename (str): name of file Returns: str: the file extension """ match = re.search(r"\.(?P<ext>[^.]+)$", filename) if match: return match.group("ext") raise ValueError(f"No extension could be extracted from '{filename}'")
8f5195b339a153d5fa144182505dba986992d4df
704,748
def scale_val(val, factor, direction): """Scale val by factor either 'up' or 'down'.""" if direction == 'up': return val+(val*factor) if direction == 'down': return val-(val*factor) raise ValueError('direction must be "up" or "down"')
16c2efe16fc787fe4461fb0ae640e2cf22d556e0
704,749
def addattrs(field, css): """ 在模板的form的field中,特别是input中添加各种attr """ attrs = {} definition = css.split(',') for d in definition: if '=' not in d: attrs['class'] = d else: t, v = d.split('=') attrs[t] = v return field.as_widget(attrs=attrs)
cdbb2b4b44b6e7facbe2af44d503c3118eb31ef7
704,750
def adjust_update_rules_for_fixed_nodes(predecessor_node_lists, truth_tables, fixed_nodes): """ Adjust "update rules" matrix and its free element vector so that the fixed nodes will end up in their fixed states on each time step automatically, with no manual interventions required. :param predecessor_node_lists: list of predecessor node lists :param truth_tables: list of dicts (key: tuple of predecessor node states, value: resulting node state) :param fixed_nodes: dict with fixed nodes (key: node, value: node state) :return: (predecessor node lists and truth tables, adjusted with respect to fixed nodes) """ adjusted_predecessor_node_lists = \ [predecessor_nodes.copy() for predecessor_nodes in predecessor_node_lists] adjusted_truth_tables = [truth_table.copy() for truth_table in truth_tables] for node, node_state in fixed_nodes.items(): adjusted_predecessor_node_lists[node] = [] adjusted_truth_tables[node] = {(): node_state} return adjusted_predecessor_node_lists, adjusted_truth_tables
f41609ae25c3622100674372de5a364b095650f8
704,751
def parse_list_from_string(value): """ Handle array fields by converting them to a list. Example: 1,2,3 -> ['1','2','3'] """ return [x.strip() for x in value.split(",")]
51e9c654b9d18b8be61c37aab5f5029dfdea2213
704,753
import itertools def merge(d1, d2): """Merge to dicts into one. Args: d1 (dict): dataset 1 d2 (dict): dataset 2 Returns: dict: merged dict """ return dict(itertools.chain(list(d1.items()), list(d2.items())))
bb1d38f3cb45de6e98855fb04ae1d3d7e73e4a40
704,755
import re def is_valid(number): """ Check if number is roman :param number: string to check :type number: str :return: True or False :rtype: bool """ return re.match( r"^(M{0,3})(D?C{0,3}|C[DM])(L?X{0,3}|X[LC])(V?I{0,3}|I[VX])$", number )
52e1937418d28701ee3d30da139f16ae64cfe480
704,756
def has_open_quotes(s): """Return whether a string has open quotes. This simply counts whether the number of quote characters of either type in the string is odd. Returns ------- If there is an open quote, the quote character is returned. Else, return False. """ # We check " first, then ', so complex cases with nested quotes will get # the " to take precedence. if s.count('"') % 2: return '"' elif s.count("'") % 2: return "'" else: return False
a9adbcd42518a71458c69c9aa1ff751fa3998573
704,758
def _qt(add_row, secondary_dict_ptr, cols, key): """ This sub-function is called by view_utils.qt to add keys to the secondary_dict and is NOT meant to be called directly. """ if cols[key]: if cols[key] in secondary_dict_ptr: return add_row, secondary_dict_ptr[cols[key]] else: secondary_dict_ptr[cols[key]] = {} return True, secondary_dict_ptr[cols[key]] else: return add_row, secondary_dict_ptr
ce1cec842822077cbfbd908ff92b1552626cd5f2
704,759
def row_contains_data(fieldnames, row): """Returns True if the value of atleast on of the fields is truthy""" for field in fieldnames: if row.get(field): return True return False
7575d1280186c582a652ab37deb4a93e667b51b2
704,761
import torch def spherical_schwarzchild_metric(x,M=1): """ Computes the schwarzchild metric in cartesian like coordinates""" bs,d = x.shape t,r,theta,phi = x.T rs = 2*M a = (1-rs/r) gdiag = torch.stack([-a,1/a,r**2,r**2*theta.sin()**2],dim=-1) g = torch.diag_embed(gdiag) print(g.shape) return g
4e65d520a88f4b9212bab43c7ebc4dfc30245bd3
704,763
def get_picard_mrkdup(config): """ input: sample config file output from BALSAMIC output: mrkdup or rmdup strings """ picard_str = "mrkdup" if "picard_rmdup" in config["QC"]: if config["QC"]["picard_rmdup"] == True: picard_str = "rmdup" return picard_str
87e24c0bf43f9ac854a1588b80731ed445b6dfa5
704,764
import random def ChoiceColor(): """ 模板中随机选择bootstrap内置颜色 """ color = ["default", "primary", "success", "info", "warning", "danger"] return random.choice(color)
15779e8039c6b389301edef3e6d954dbe2283d54
704,765
import hashlib def __get_str_md5(string): """ 一个字符串的MD5值 返回一个字符串的MD5值 """ m0 = hashlib.md5() m0.update(string.encode('utf-8')) result = m0.hexdigest() return result
1d55cd42dc16a4bf674907c9fb352f3b2a100d6c
704,766
def dc_coordinates(): """Return coordinates for a DC-wide map""" dc_longitude = -77.016243706276569 dc_latitude = 38.894858329321485 dc_zoom_level = 10.3 return dc_longitude, dc_latitude, dc_zoom_level
c07812ad0a486f549c63b81787a9d312d3276c32
704,767
import argparse def get_arguments(): """ Obtains command-line arguments. :rtype: argparse.Namespace """ parser = argparse.ArgumentParser() parser.add_argument( '--clusters', type=argparse.FileType('rU'), required=True, metavar='CLUSTERS', help='read cluster assignments from CSV file %(metavar)s') parser.add_argument( '--diagnoses', type=argparse.FileType('rU'), required=True, metavar='DIAGNOSES', help='read diagnoses from CSV file %(metavar)s') parser.add_argument( '--scores', type=argparse.FileType('rU'), required=True, metavar='SCORES', help='read scores from CSV file %(metavar)s') parser.add_argument( '--base-dir', required=True, metavar='BASE-DIR', help='output all files under directory %(metavar)s') parser.add_argument( '--diagnosis-order', type=argparse.FileType('rU'), metavar='DIAGNOSIS-ORDER', help='read the order of diagnoses from text file %(metavar)s') parser.add_argument( '--diagnosis-map', type=argparse.FileType('rU'), metavar='DIAGNOSIS-MAP', help='load diagnosis mappings from %(metavar)s') parser.add_argument( '--intraclassification-spacing', type=float, default=10., metavar='INTRACLASSIFICATION-SPACING', help=('set the intraclassification spacing to %(metavar)s (default ' '%(default)s)')) parser.add_argument( '--interclassification-spacing', type=float, default=50., metavar='INTERCLASSIFICATION-SPACING', help=('set the interclassification spacing to %(metavar)s (default ' '%(default)s)')) parser.add_argument( '--log', metavar='LOG', help='output logging information to %(metavar)s') return parser.parse_args()
32ce1446d8ac04208a4387bcd2ac31a2609580a3
704,768
from typing import Callable from typing import Iterable from typing import List def lmap(f: Callable, x: Iterable) -> List: """list(map(f, x))""" return list(map(f, x))
51b09a3491769aafba653d4198fde94ee733d68f
704,769
def multiply_something(num1, num2): """this function will multiply num1 and num2 >>> multiply_something(2, 6) 12 >>> multiply_something(-2, 6) -12 """ return(num1 * num2)
1a726c04df146ab1fa7bfb13ff3b353400f2c4a8
704,771
def estimate_infectious_rate_constant_vec(event_times, follower, t_start, t_end, kernel_integral, count_events=None): """ Returns estimation of infectious rate for given event time and followers on defined interval. Optimized using numpy. :param event_times: nd-array of event times :param follower: nd-array of follower counts :param t_start: time interval start :param t_end: time interval end :param kernel_integral: integral function of kernel function :param count_events: count of observed events in interval (used for time window approach) :return: estimated values for infectious rate """ kernel_int = follower * kernel_integral(t_start - event_times, t_end - event_times) if count_events is not None: return count_events / kernel_int.sum() else: return event_times.size / kernel_int.sum()
207833e1b32885fe39a209bfef227665c8c59ad1
704,772
def find(word,letter): """ find letter in word , return first occurence """ index=0 while index < len(word): if word[index]==letter: #print word,' ',word[index],' ',letter,' ',index,' waht' return index index = index + 1 return -1
bdeb0f0993fb4f7904b4e9f5244ea9d7817fa15f
704,773
import re def file_read(lines): """ Function for the file reading process Strips file to get ONLY the text; No timestamps or sentence indexes added so returned string is only the caption text. """ # new_text = "" text_list = [] for line in lines: if re.search('^[0-9]', line) is None and re.search('^[0-9]{2}:[0-9]{2}:[0-9]{2}', line)\ is None and re.search( '^$', line) is None: text_list.append(line.rstrip('\n')) return text_list
7d37bb79c6b1cdd43d7b813e03bf3d8b18f5a6ed
704,774
def has_file_ext(view, ext): """Returns ``True`` if view has file extension ``ext``. ``ext`` may be specified with or without leading ``.``. """ if not view.file_name() or not ext.strip().replace('.', ''): return False if not ext.startswith('.'): ext = '.' + ext return view.file_name().endswith(ext)
043edf03874d1ec20e08fcb5795fd205206f7194
704,775
def get_genes(exp_file, samples, threshold, max_only): """ Reads in and parses the .bed expression file. File format expected to be: Whose format is tab seperated columns with header line: CHR START STOP GENE <sample 1> <sample 2> ... <sample n> Args: exp_file (str): Name of expression file. samples (list): Names of the samples in the vcf file. threshold (float): Expression threshold to filter lowly/unexpressed genes. max_only (bool): if true, gene_dict value is 1 value = max expression if false gene_dict value is list of expression values YYY: WARNING: if want list to have meaning then values needs to be tied to header sample names Returns: gene_dict (dict): {gene_name: [expression_vals]}. Only include values for samples in the vcf. """ data_cols = [] gene_dict = {} print('start read exp_file:' + format(exp_file)) if max_only: # read and only return max exp value in gene_dict with open(exp_file) as f: header = f.readline().strip().split('\t') for samp in header[4:]: if samp in samples: data_idx = header.index(samp) data_cols.append(data_idx) # Read in expression levels for each gene. for line in f: line = line.strip().split('\t') gene_name = line[3].upper() exp_val = -1e1000 for idx in data_cols: if float(line[idx]) > exp_val: exp_val = float(line[idx]) gene_dict[gene_name] = exp_val else: # read and return exp value list in gene_dict with open(exp_file) as f: header = f.readline().strip().split('\t') for samp in header[4:]: if samp in samples: data_idx = header.index(samp) data_cols.append(data_idx) # Read in expression levels for each gene. for line in f: line = line.strip().split('\t') gene_name = line[3].upper() exp_vals = [] for idx in data_cols: exp_vals.append(line[idx]) gene_dict[gene_name] = exp_vals return gene_dict
62b27eef9c863078c98dee0d09bada5e058909e2
704,776
def conv_name_to_c(name): """Convert a device-tree name to a C identifier This uses multiple replace() calls instead of re.sub() since it is faster (400ms for 1m calls versus 1000ms for the 're' version). Args: name: Name to convert Return: String containing the C version of this name """ new = name.replace('@', '_at_') new = new.replace('-', '_') new = new.replace(',', '_') new = new.replace('.', '_') return new
150af670d8befea7374bbb5b13da9d6e0734863e
704,777
def get_account_id(role_arn): """ Returns the account ID for a given role ARN. """ # The format of an IAM role ARN is # # arn:partition:service:region:account:resource # # Where: # # - 'arn' is a literal string # - 'service' is always 'iam' for IAM resources # - 'region' is always blank for IAM resources # - 'account' is the AWS account ID with no hyphens # # See https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-arns try: arn, _, service, region, account, _ = role_arn.split(":") except ValueError: raise ValueError(f"Is this a valid AWS ARN? {role_arn}") if arn != "arn": raise ValueError(f"Is this a valid AWS ARN? {role_arn}") if service != "iam" or region != "" or not account.isnumeric(): raise ValueError(f"Is this an IAM role ARN? {role_arn}") return account
623eb66eefd59b9416deb478c527062ae4454df7
704,778
from typing import Any def list_to_dict(data: list, value: Any = {}) -> dict: """Convert list to a dictionary. Parameters ---------- data: list Data type to convert value: typing.Any Default value for the dict keys Returns ------- dictionary : dict Dictionary of the input data """ return {item: value for item in data}
1e73bb6ca98b5e2d9b1e0f8d4cb19fc044a9ce63
704,780