content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def get_config_by_url(cfg, url): """ :param cfg: dictionary :param url: path to value separated by dot, e.g, key1.key2.key3 :return: value from dictionary """ keys = url.split('.') for key in keys: cfg = cfg[key] return cfg
0dd2a01a2ecd198f044eebcd3f854f96dbf945bd
702,083
from typing import Any def not_a_seq_complex(request: Any) -> Any: """Provide random values that are not sequences of complexs.""" return request.param[1]
ad28133edced67801deab511100b19606dfa2cd3
702,084
def cut_transcript_seq(seq: str, tag: str): """ Some of the sequences contain length % 3 != 0, because they have ambiguous start and/or end. If this is the case, they should be cut until length % 3 == 0 There are sequences which have both ambiguous start and end => no solution yet :param seq: dna sequences of the current protein :param tag: tags, which contain information about ambiguous start and/or end :return: correct dna sequence with length % 3 == 0 if ambiguous start and end or no tags provided, but the sequence has length % 3 != 0 seq = 'NNN' """ if "cds_end_NF" in tag and "cds_start_NF" not in tag: while(len(seq) % 3 != 0): seq = seq[:-1] if seq[-3:] in ["TAA", "TAG", "TGA"]: seq = seq[:-3] elif "cds_end_NF" not in tag and "cds_start_NF" in tag and len(seq) % 3 != 0: while(len(seq) % 3 != 0): seq = seq[1:] seq = "XXX"+seq elif "cds_end_NF" in tag and "cds_start_NF" in tag: print("Ambiguous start and end! Skip seq!") seq = "NNN" # NNN will be translated as empty string elif "cds_end_NF" not in tag and "cds_start_NF" not in tag and len(seq) % 3 != 0: print("No tags for ambiguous start and end, but len % 3 != 0. Skip seq!") seq = "NNN" # NNN will be translated as empty string return seq
3fdc6512f1edc47157778b828b944fdf940fa37a
702,085
def kama(df, price, kama, n, fast_ema=2, slow_ema=30): """ Kaufman's Adaptive Moving Average (KAMA) is a moving average designed to account for market noise or volatility. KAMA will closely follow prices when the price swings are relatively small and the noise is low. KAMA will adjust when the price swings widen and follow prices from a greater distance. Parameters: df (pd.DataFrame): DataFrame which contain the asset information. price (string): the column name for the series type of the asset. kama (string): the column name for the kama results. n (int): the total number of periods. fast_ema (int): the time period of the fast exponential moving average. slow_ema (int): the time period of the slow exponential moving average. Returns: df (pd.DataFrame): Dataframe with kama of the asset calculated. """ er = ( df[price].diff(n).abs() / df[price].diff().abs().rolling(window=n).sum() ) fast_sc = 2 / (fast_ema + 1) slow_sc = 2 / (slow_ema + 1) df[kama + "_sc"] = ((er * (fast_sc - slow_sc)) + slow_sc) ** 2 prev_kama = list(df[:n][price].rolling(window=n).mean())[-1] df.loc[n - 1, kama] = prev_kama df.loc[n:, kama] = 0.0 kamas = [0.0 for i in range(n)] for row in df.loc[n:, [price, kama + "_sc"]].itertuples(index=False): kamas.append(prev_kama + row[1] * (row[0] - prev_kama)) prev_kama = kamas[-1] df[kama] += kamas df = df.dropna().reset_index(drop=True) df.drop([kama + "_sc"], axis=1, inplace=True) return df
58969929ad098c92346e21910002f1d5c9428ed6
702,086
import os def find_deepest_user_frame(tb): """ Find the deepest stack frame that is not part of SCons. Input is a "pre-processed" stack trace in the form returned by traceback.extract_tb() or traceback.extract_stack() """ tb.reverse() # find the deepest traceback frame that is not part # of SCons: for frame in tb: filename = frame[0] if filename.find(os.sep+'SCons'+os.sep) == -1: return frame return tb[0]
11753ebbb734058fe4e6dd42d43ba95cbe07c88e
702,088
def task_run_black(): """ calls black code formatter """ task = { 'actions': ['black textwalker'], 'verbosity': 2 } return task
794fc232bb9096c39885d8ffe25c14b8105ac6b6
702,090
def imq_kernel(x, y, score_x, score_y, g=1, beta=0.5, return_kernel=False): """Compute the IMQ Stein kernel between x and y Parameters ---------- x : torch.tensor, shape (n, p) Input particles y : torch.tensor, shape (n, p) Input particles score_x : torch.tensor, shape (n, p) The score of x score_y : torch.tensor, shape (n, p) The score of y g : float Bandwidth beta : float Power of the kernel return_kernel : bool whether the original kernel k(xi, yj) should also be returned Return ------ stein_kernel : torch.tensor, shape (n, n) The linear Stein kernel kernel : torch.tensor, shape (n, n) The base kernel, only returned id return_kernel is True """ _, p = x.shape d = x[:, None, :] - y[None, :, :] dists = (d ** 2).sum(axis=-1) res = 1 + g * dists kxy = res ** (-beta) scores_d = score_x[:, None, :] - score_y[None, :, :] temp = d * scores_d dkxy = 2 * beta * g * (res) ** (-beta - 1) * temp.sum(axis=-1) d2kxy = 2 * ( beta * g * (res) ** (-beta - 1) * p - 2 * beta * (beta + 1) * g ** 2 * dists * res ** (-beta - 2) ) k_pi = score_x.mm(score_y.T) * kxy + dkxy + d2kxy if return_kernel: return k_pi, kxy return k_pi
08c95b04789f47b557645df975a20c1f1b478a0d
702,091
def build_rgb_and_opacity(s): """ Given a KML color string, return an equivalent RGB hex color string and an opacity float rounded to 2 decimal places. EXAMPLE:: >>> build_rgb_and_opacity('ee001122') ('#221100', 0.93) """ # Set defaults color = '000000' opacity = 1 if s.startswith('#'): s = s[1:] if len(s) == 8: color = s[6:8] + s[4:6] + s[2:4] opacity = round(int(s[0:2], 16)/256, 2) elif len(s) == 6: color = s[4:6] + s[2:4] + s[0:2] elif len(s) == 3: color = s[::-1] return '#' + color, opacity
06cb729338584c9b3b934a844f5a2ec53245e967
702,092
import aiohttp async def shorten(long_url: str, api_base: str, api_key: str): """ Creates a short url if valid. """ params = { 'url': long_url, 'key': api_key, 'response_type': 'json' } async with aiohttp.ClientSession() as sess: async with sess.get(api_base + '/api/v2/action/shorten', params=params) as r: data = await r.json() action = data.get('action') short_url = data.get('result') if action == 'shorten' and short_url is not None: return short_url
14bad17b0b39ab09526269ec105086e2ac497f4a
702,093
def url_add_api_key(url_dict: dict, api_key: str) -> str: """Attaches the api key to a given url Args: url_dict: Dict with the request url and it's relevant metadata. api_key: User's API key provided by US Census. Returns: URL with attached API key information. """ return url_dict['url']+f'&key={api_key}'
1442d0f67a1f3603205870d1af0baf30eb3f1d50
702,095
from pathlib import Path def config_file_path(token_file: str) -> Path: """Provide Path to config file""" if token_file is None: return Path.joinpath(Path.home(), ".rmapi") else: return Path(token_file)
8f31d64bcb720999080a9bf61b3f174d6882141c
702,096
def _compute_fans(shape): """Computes the fan-in and fan-out for a depthwise convolution's kernel.""" if len(shape) != 4: raise ValueError( 'DepthwiseVarianceScaling() is only supported for the rank-4 kernels ' 'of 2D depthwise convolutions. Bad kernel shape: {}' .format(str(shape))) receptive_field_size = shape[0] * shape[1] depth_multiplier = shape[3] fan_in = receptive_field_size fan_out = receptive_field_size * depth_multiplier return (fan_in, fan_out)
a33bfdf32080147f092d32fca1d70a90b2b25e91
702,097
def merge_shards(shard_data, existing): """ Compares ``shard_data`` with ``existing`` and updates ``shard_data`` with any items of ``existing`` that take precedence over the corresponding item in ``shard_data``. :param shard_data: a dict representation of shard range that may be modified by this method. :param existing: a dict representation of shard range. :returns: True if ``shard data`` has any item(s) that are considered to take precedence over the corresponding item in ``existing`` """ if not existing: return True if existing['timestamp'] < shard_data['timestamp']: # note that currently we do not roll forward any meta or state from # an item that was created at older time, newer created time trumps return True elif existing['timestamp'] > shard_data['timestamp']: return False new_content = False # timestamp must be the same, so preserve existing range bounds and deleted for k in ('lower', 'upper', 'deleted'): shard_data[k] = existing[k] # now we need to look for meta data updates if existing['meta_timestamp'] >= shard_data['meta_timestamp']: for k in ('object_count', 'bytes_used', 'meta_timestamp'): shard_data[k] = existing[k] else: new_content = True if (existing['state_timestamp'] == shard_data['state_timestamp'] and shard_data['state'] > existing['state']): new_content = True elif existing['state_timestamp'] >= shard_data['state_timestamp']: for k in ('state', 'state_timestamp', 'epoch'): shard_data[k] = existing[k] else: new_content = True return new_content
18704dd79274dd7ec6157cd28be04a5858e6cff7
702,098
def inFileDict(inFileList): """ generate a nested dictionary of the input files organized by sample and barcode in the format: dict[sample][barcodeGroup][dataType]=fileName """ outDict = {} for f in inFileList: sample = f.split('_')[-3].split('/')[-1] barcodes = f.split('_')[-2] dType = f.split('_')[-1].split('.')[0] if sample not in outDict.keys(): outDict[sample] = {} if barcodes not in outDict[sample].keys(): outDict[sample][barcodes] = {} outDict[sample][barcodes][dType] = f return outDict
c67183b85890128b3b1728d15f7a45532ee490a3
702,099
def usage_percent(used, total, round_=None): """Calculate percentage usage of 'used' against 'total'.""" try: ret = (float(used) / total) * 100 except ZeroDivisionError: return 0.0 else: if round_ is not None: ret = round(ret, round_) return ret
dd707700de52020102ad51ad6f8494d0db489463
702,101
import re def is_valid_regex(string): """ Checks whether the re module can compile the given regular expression. :param string: str :return: boolean """ try: re.compile(string) is_valid = True except re.error: is_valid = False return is_valid
3893410afd8d3e6ed9310550159b35cc504dfffa
702,102
import logging def format_mulenc_args(args): """Format args for multi-encoder setup. It deals with following situations: (when args.num_encs=2): 1. args.elayers = None -> args.elayers = [4, 4]; 2. args.elayers = 4 -> args.elayers = [4, 4]; 3. args.elayers = [4, 4, 4] -> args.elayers = [4, 4]. """ # default values when None is assigned. default_dict = { "etype": "blstmp", "elayers": 4, "eunits": 300, "subsample": "1", "dropout_rate": 0.0, "atype": "dot", "adim": 320, "awin": 5, "aheads": 4, "aconv_chans": -1, "aconv_filts": 100, } for k in default_dict.keys(): if isinstance(vars(args)[k], list): if len(vars(args)[k]) != args.num_encs: logging.warning( "Length mismatch {}: Convert {} to {}.".format( k, vars(args)[k], vars(args)[k][: args.num_encs] ) ) vars(args)[k] = vars(args)[k][: args.num_encs] else: if not vars(args)[k]: # assign default value if it is None vars(args)[k] = default_dict[k] logging.warning( "{} is not specified, use default value {}.".format( k, default_dict[k] ) ) # duplicate logging.warning( "Type mismatch {}: Convert {} to {}.".format( k, vars(args)[k], [vars(args)[k] for _ in range(args.num_encs)] ) ) vars(args)[k] = [vars(args)[k] for _ in range(args.num_encs)] return args
fbecf63e660ba0756e86a4334de95e7ae5f6736c
702,103
def parse_cluster_pubsub_numsub(res, **options): """ Result callback, handles different return types switchable by the `aggregate` flag. """ aggregate = options.get('aggregate', True) if not aggregate: return res numsub_d = {} for _, numsub_tups in res.items(): for channel, numsubbed in numsub_tups: try: numsub_d[channel] += numsubbed except KeyError: numsub_d[channel] = numsubbed ret_numsub = [] for channel, numsub in numsub_d.items(): ret_numsub.append((channel, numsub)) return ret_numsub
0e499f8508b0f5507fa0b2c418d0a253d35a32f5
702,104
def convert_aux_to_base(new_aux: float, close: float): """converts the aux coin to the base coin Parameters ---------- new_base, the last amount maintained by the backtest close, the closing price of the coin Returns ------- float, amount of the last aux divided by the closing price """ if new_aux: return round(new_aux * close, 8) return 0.0
f76324e0a61a58a926d3f4cadf60315692d35fee
702,105
import calendar def year_add(date, years): """Add number of years to date. >>> import datetime >>> year_add(datetime.datetime(2016, 2, 29), 1) datetime.date(2017, 2, 28) >>> year_add(datetime.date(2016, 2, 29), 1) datetime.date(2017, 2, 28) >>> year_add(datetime.date(2015, 2, 28), 1) datetime.date(2016, 2, 28) >>> year_add(datetime.date(2017, 2, 28, -1) datetime.date(2016, 2, 28) >>> year_add(datetime.datetime(2016, 2, 29), -1) datetime.datetime(2015, 2, 28) """ if date.day == 29 and date.month == 2 and not calendar.isleap(date.year + years): return date.replace(day=28, year=date.year + years) return date.replace(year=date.year + years)
62be01b7051ddef27376ebae4b97f63e9b7ca979
702,106
def calc_skier_position(skierposition,userinput): """ """ if userinput == "j": skierposition = skierposition - 1 if userinput == "k": skierposition = skierposition + 1 return skierposition
66f638f9d9b5311efed10bbfe28274a9e80b9dd7
702,108
def _get_command_powershell_script(command): """Return a valid CMD command that runs a powershell script.""" return "powershell -NonInteractive -NoLogo -File {}".format(command)
fdd67ac942e7869417c57f8021f26480228bc0a7
702,109
def _getProductPath(product_name): """ Return the absolute path of the product's directory. """ try: # BBB: for GenericSetup 1.1 style product names product = __import__('Products.%s' % product_name, globals(), {}, ['initialize']) except ImportError: try: product = __import__(product_name, globals(), {}, ['initialize']) except ImportError: raise ValueError('Not a valid product name: %s' % product_name) return product.__path__[0]
e269c1d261bb60c92f3c83fce66e6168be486e84
702,110
def getModelSupportTypes(data): """ 获取模型支持的分类 :return: """ temp = '' for i in data: temp = temp + ' ' + i return temp
b44a362ca231c65eff5aff8d17da9a46236106b6
702,111
import numpy as _np def hausdorff_distance(polyline1,polyline2): """ Compute the hausdorff distance from `polyline1` to `polyline2` :Inputs: `polyline1`: a (k,n1) array for the n1 points of the 1st polyline in k-dimension `polyline2`: a (k,n2) array for the n2 points of the 2nd polyline in k-dimension :Output: The hausdorff distance: max( D(polyline1,polyline2), D(polyline2,polyline1) ) where D(p1,p2) = max_(i in p1) |p1[i] - closest-projection-on-p2| """ p1 = _np.asfarray(polyline1) p2 = _np.asfarray(polyline2) norm = lambda x: (x**2).sum(axis=0)**.5 def max_min_dist(points, polyline): v1 = polyline[:,:-1] # 1st segment vertex, shape (k,n2-1) v2 = polyline[:, 1:] # 2nd segment vertex, shape (k,n2-1) sdir = v2-v1 # direction vector of segment lsl = norm(sdir) # distance between v1 and v2 lsl = _np.maximum(lsl,2**-5) sdir /= lsl # make sdir unit vectors # distance from v1 to the projection of points on segments # disallow projection out of segment: values are in [0,lsl] on_edge = ((points[:,:,None]-v1[:,None,:])*sdir[:,None,:]).sum(axis=0) # (n1,n2-1) on_edge = _np.minimum(_np.maximum(on_edge,0),lsl[None,:]) # points projection on sdir nproj = v1[:,None,:] + on_edge[None,:,:]*sdir[:,None,:] # (k,n1,n2-1) # distance from points to "points projection on sdir" return norm(nproj - points[:,:,None]).min(axis=1).max() return max(max_min_dist(p1,p2), max_min_dist(p2,p1))
7f19bedf1c6d17ff0535f9f6c0b7860532491c7d
702,112
from pathlib import Path import json def latest_checkpoint(model_dir, model_name): """return path of latest checkpoint in a model_dir Args: model_dir: string, indicate your model dir(save ckpts, summarys, logs, etc). model_name: name of your model. we find ckpts by name Returns: path: None if isn't exist or latest checkpoint path. """ ckpt_info_path = Path(model_dir) / "checkpoints.json" if not ckpt_info_path.is_file(): return None with open(ckpt_info_path, "r") as f: ckpt_dict = json.loads(f.read()) if model_name not in ckpt_dict["latest_ckpt"]: return None latest_ckpt = ckpt_dict["latest_ckpt"][model_name] ckpt_file_name = Path(model_dir) / latest_ckpt if not ckpt_file_name.is_file(): return None return str(ckpt_file_name)
29acafdb72bbb549cda7d72cc15a5a93f5535dca
702,113
def get_token_list(text): """Returns a list of tokens. This function expects that the tokens in the text are separated by space character(s). Example: "ca n't , touch". This is the case at least for the public DiscoFuse and WikiSplit datasets. Args: text: String to be split into tokens. """ return text.split()
01a917fae5923cdfd693548bb688695a917fab70
702,115
def cut_string(string, limit=30): """Shorten the length of longer strings.""" if len(string) <= limit: return string else: return string[:limit-3] + '...'
842cfefcff84c4f146cc85a4e86dff1486e9a434
702,116
def heading(heading_string, underline='='): """ Takes a raw string and underlines it with the given underline char """ return '%s\n%s' % (heading_string, underline * len(heading_string))
369385ffef60b88ba7e3a5c376236f6d4043ac72
702,117
def composed_measurement(ec_measurement): """Fixture that returns a composed measurement""" measurement1 = ec_measurement.select(cycle=1) measurement2 = ec_measurement.select(cycle=3) return measurement1 + measurement2
f7aa5891b7a9f88e2d18723cd526ff48044d0fd8
702,118
def AverageOverlap(l1, l2, depth = 10): """Calculates Average Overlap score. l1 -- Ranked List 1 l2 -- Ranked List 2 depth -- depth @author: Ritesh Agrawal @Date: 13 Feb 2013 @Description: This is an implementation of average overlap measure for comparing two score (Refererence: http://www.umiacs.umd.edu/~wew/papers/wmz10_tois.pdf). This is a modified implementation of https://github.com/maslinych/linis-scripts/blob/master/rbo_calc.py It is a linear implementation of the RBO and assumes there are no duplicates and doesn't handle for ties. """ if l1 == None: l1 = [] if l2 == None: l2 = [] sl, ll = sorted([(len(l1), l1),(len(l2),l2)]) s, S = sl # s = length of smaller list, S = Smaller List l, L = ll # l = length of longer list, L = Longer list #sanity check if s == 0: return 0 depth = depth if depth < l else l # Calculate fraction of overlap from rank at ranks 1 through depth # (the longer of the two lists) ss = set([]) ls = set([]) overlap = {0: 0} # overlap holds number of common elements at depth d sum1 = 0.0 for i in range(depth): # get elements from the two list x = L[i] y = S[i] if i < s else None depth = i+1 # if the two elements are same, then we don't need # to them to the list and just increment the if x == y: overlap[depth] = overlap[i] + 2 #else add items to the two list else: ls.add(x) if y != None: ss.add(y) overlap[depth] = overlap[i] + (2 if x in ss else 0) + (2 if y in ls else 0) sum1 = sum1 + float(overlap[depth])/(len(S[0:depth]) + depth) return sum1/depth
9cec7fcf500ae6e59eb44e6bad4a9b5c376f87c3
702,119
def three_shouts(word1, word2, word3): """Returns a tuple of strings concatenated with '!!!'.""" # Define inner def inner(word): """Returns a string concatenated with '!!!'.""" return word + '!!!' # Return a tuple of strings return (inner(word1), inner(word2),inner(word3))
d7986646a48fcdd3448d834d59ce497c292a984d
702,120
def add_custom_header(res): """レスポンスにカスタムヘッダーを追加する。""" res.headers["Cache-Control"] = "no-cache, no-store, must-revalidate" res.headers["Expires"] = "0" res.headers["Server"] = "Roppo-JSON" return res
7ed261a4aa9d4aa532bd2c07544a4ffe4ec1cafb
702,121
def profile_to_node(src_profile): """convert source profile to graph node.""" return (src_profile['uid'], src_profile)
970d349c2884dd57d10bef8f7e2649509e480a62
702,122
import base64 def compile_program(client, code): """This Functon helps to compile our source code Args: client: [description] code: source code Returns: Encoded compiled code """ compiler_response =client.compile(code) return base64.b64decode(compiler_response["result"])
e666a420b0c2b96d46d6b096e1fb3f2e570dee87
702,123
import argparse def parse_args(): """parse custom arguments and set default value""" parser = argparse.ArgumentParser( description="Trim spaces at the end of every lines." ) parser.add_argument("-R", "-r", action="store_true", help="Whether to recursive") parser.add_argument("-y", "--yes", action="store_true", help="No ask") parser.add_argument( "-t", "--type", default="*", help="Filter file type(Example: *.py)" ) parser.add_argument("-d", "--dir", default="", help="The directory path") parser.add_argument( "files", nargs="+", default=[], metavar="*.py", help="files or directories", ) return parser.parse_args()
8aff3fba3f9c5af0e98d938c544d1dde08977086
702,124
def mult(v1, m): """multiplies a vector""" return (v1[0]*m,v1[1]*m)
5055a89c9e3175d103071c09a4553cc6b1528bae
702,125
def is_whitelist_violation(rules, policy): """Checks if the policy is not a subset of those allowed by the rules. Args: rules (list): A list of FirewallRule that the policy must be a subset of. policy (FirweallRule): A FirewallRule. Returns: bool: If the policy is a subset of one of the allowed rules or not. """ policy_subset_check = [] for rule in rules: if policy < rule: policy_subset_check.append(True) else: policy_subset_check.append(False) result = not any(policy_subset_check) return result
00320174323d0827a201944a11a24be0bf0ce204
702,127
def cria_peca(peca): """ cria_peca: str -> peca Recebe um identificador de jogador (ou peca livre) e devolve um dicionario que corresponde ah representacao interna da peca. R[peca] -> {'peca': peca} """ if type(peca) != str or len(peca) != 1 or peca not in 'XO ': raise ValueError('cria_peca: argumento invalido') return {'peca': peca}
6a74212f49695addab80c68f41a1e7e7d45e1ed6
702,128
def basic(s, coeffs): """Performs the "standard" de Casteljau algorithm.""" r = 1 - s degree = len(coeffs) - 1 pk = list(coeffs) for k in range(degree): new_pk = [] for j in range(degree - k): new_pk.append(r * pk[j] + s * pk[j + 1]) # Update the "current" values. pk = new_pk return pk[0]
cd12b21a0b35752b67f26eba10ee54650d45c49d
702,129
def matchnocase(word, vocab): """ Match a word to a vocabulary while ignoring case :param word: Word to try to match :param vocab: Valid vocabulary :return: >>> matchnocase('mary', {'Alice', 'Bob', 'Mary'}) 'Mary' """ lword = word.lower() listvocab = list(vocab) # this trick catches dict and set in addition to list lvocab = [w.lower() for w in listvocab] if lword in lvocab: return listvocab[lvocab.index(lword)] return None
ba0354d7669d08fbdedc926c11f446c26f401e89
702,130
from datetime import datetime def get_first_timestamp(log_file, search_text): """Get the first timestamp of `search_text' in the log_file Args: log_file search_text (str) Returns: timestamp: datetime object """ timestamp = None with open(log_file, "r") as f: content = [next(f) for x in range(51)] content = [x.strip().split() for x in content] for log_line in content: # Extract text text = ' '.join(log_line[3:-1]) if search_text in text.lower(): # Extract date for this text date_time_str = ' '.join(log_line[0:2])[1:-1] timestamp = datetime.strptime(date_time_str, '%Y-%m-%d %H:%M:%S.%f') break return timestamp
fbf5f00ea0810788019ec081a67664393763a95c
702,131
def get_fetaure_names(df, feature_name_substring) : """ Returns the list of features with name matching 'feature_name_substring' """ return [col_name for col_name in df.columns if col_name.find(feature_name_substring) != -1]
14103620e89b282da026fd9f30c7491b63820c09
702,132
import numbers def ISNUMBER(value): """ Checks whether a value is a number. >>> ISNUMBER(17) True >>> ISNUMBER(-123.123423) True >>> ISNUMBER(False) True >>> ISNUMBER(float('nan')) True >>> ISNUMBER(float('inf')) True >>> ISNUMBER('17') False >>> ISNUMBER(None) False >>> ISNUMBER(datetime.date(2011, 1, 1)) False More tests: >>> ISNUMBER(AltText("text")) False >>> ISNUMBER('') False """ return isinstance(value, numbers.Number)
422c5bcd24a21a50bfefb1a00193387e725d435b
702,133
def api_methods(): """ API symbols that should be available to users upon module import. """ return { 'point', 'scalar', 'scl', 'rnd', 'inv', 'smu', 'pnt', 'bas', 'mul', 'add', 'sub' }
a5f23b48509adb966e10e3309ace93c31651ebd3
702,134
def is_prime(n): """Determine if input number is prime number Args: n(int): input number Return: true or false(bool): """ for curr_num in range(2, n): # if input is evenly divisible by the current number if n % curr_num == 0: # print("current num:", curr_num) return False return True
518a0e78056668e9d8b0a708a05ba9bc9b9cf3d2
702,135
def eval_multiple(exprs,**kwargs): """Given a list of expressions, and keyword arguments that set variable values, returns a list of the evaluations of the expressions. This can leverage common subexpressions in exprs to speed up running times compared to multiple eval() calls. """ for e in exprs: e._clear_eval_cache() return [e._eval([],kwargs) for e in exprs]
2bc90dacb972d3315168638a4ea99f9cfbb13830
702,136
import requests def get(url, params, proxies, headers): """Send a request with the GET method.""" response = requests.get(url, params=params, proxies=proxies, headers=headers) return response
a481a91e5f3fc71f88de8d84efaac3dd666c302e
702,137
def account_ids_equal(account_id_a, account_id_b): """ Compare two account IDs while discarding the account prefix :return: Whether the account IDs are equal :rtype: bool """ return account_id_a[-60:-8] == account_id_b[-60:-8]
4f26d7db93fa06ce834487d1e6876c8fe8602b18
702,138
def action_from_trinary_to_env(action) -> int: """ Maps trinary model output to int action understandable by env """ assert action in (0, 1, 2), f'Wrong action: {action}' return { 0: 0, 1: 2, 2: 5 }[action]
e2a7cd3d6c018a7112e2304f2910781b665a8247
702,139
def ofxPluginPath(): """ nuke.ofxPluginPath() -> String list List of all the directories Nuke searched for OFX plugins in. @return: String list """ return list()
9a8b7b3f69047818e7a3a31e0ff8bca50e57ed4c
702,140
import hashlib def file_md5(file_path: str) -> str: """Compute the md5 hex digest of a binary file with reasonable memory usage.""" md5_hash = hashlib.md5() with open(file_path, 'rb') as f: while True: data = f.read(65536) if not data: break md5_hash.update(data) return md5_hash.hexdigest()
ac0576e901ca3205f824a599566e628ee29f5a7c
702,141
def pl_to_eng(unit: str) -> str: """Converts Polish terminology to English""" switcher = { "pikinier": "spear", "miecznik": "sword", "topornik": "axe", "lucznik": "archer", "zwiadowca": "spy", "lekki kawalerzysta": "light", "lucznik na koniu": "marcher", "ciezki kawalerzysta": "heavy", "taran": "ram", "katapulta": "catapult", "szlachcic": "snob" } return switcher.get(unit, "error")
7d9634c554cc84663b1c7fb787b070d535747d36
702,142
def is_meta_resource_type_mutable(context, meta_resource_type): """Return True if the meta_resource_type is mutable in this context.""" if context.is_admin: return True if context.owner is None: return False # (lakshmiS): resource type can exist without an association with # namespace and resource type cannot be created/update/deleted directly( # they have to be associated/de-associated from namespace) if meta_resource_type.namespace: return meta_resource_type.namespace.owner == context.owner else: return False
54e6e1333c8e3875efd2f1c049eb92962a2fe3e9
702,143
import glob import os def count_files_in_dir(dir_name): """ After do_activity, check the directory contains a zip with ds_zip file name """ file_names = glob.glob(dir_name + os.sep + "*") return len(file_names)
6d53c430c55a0b8dbf98df0845b3e742639218d2
702,144
import requests import re def proc_imgur_url(starturl): """ Process an imgur link """ # If imgur is not in the link, skip it if "imgur.com" not in starturl: return starturl finishedurl = [] regex = r"href\=\"https://i\.imgur\.com\/([\d\w]*)(\.jpg|\.png|\.gif|\.mp4|\.gifv)" try: imgurHTML = requests.get(starturl) except: raise Exception('Something failed') # Try finding all the embedded imgur links imgurhash = re.findall(regex, imgurHTML.text) # If no embedded links have been found, return the original url if len(imgurhash) == 0: return starturl finishedurl.append( 'https://i.imgur.com/{0}{1}'.format(imgurhash[0][0], imgurhash[0][1])) return finishedurl
0a026c2473ebfd827439049c1a50e5678c26a638
702,146
from typing import Dict from typing import List from pathlib import Path import yaml def write_commands_instructions( commands_instructions: Dict[str, List[str]], scene_path: Path, index: int ) -> Path: """*Deprecated, use `write_yaml_instructions` instead.* Writes a command instruction `yaml` file. These are the files that [`runner`](github.com/TrickyTroll/good-bot-runner) takes as input to type commands and expect stuff. Args: commands_instructions (Dict[str, List[str]]): A dictionary of commands and things to expect. Keys should be either `commands` or `expect`. The values should be a list of commands and a list of things to expect. scene_path (Path): The path towards the scene where the `commands_instructions` come from. index (int): The index of the command block. Returns: Path: The path towards where the new `yaml` file has been written. """ commands_path: Path = scene_path / Path("commands") file_path: Path = commands_path / Path(f"commands_{index + 1}").with_suffix(".yaml") to_write: str = yaml.safe_dump(commands_instructions) with open(file_path, "w") as stream: stream.write(to_write) return file_path
1a0cc75b9bcf269133b799f1abe4b507b2f42c16
702,147
import re def replace_simultaneous(s: str, r0: str, r1: str) -> str: """ Replace all instances of `r0` in `s` with `r1` and vice versa. This method does the replacements simultaneously so that, e.g., if you call `replace_simultaneous("apple banana", "apple", "banana")` the result will be `"banana apple"` instead of either `"apple apple"` or `"banana banana"` for a sequential replacement (depending on the order of the replacements). It is assumed that `r0` is not a substring of `r1` and vice versa. :param s: string to do replacements in :param r0: first string to replace :param r1: second string to replace """ replacement = {r0: r1, r1: r0} r0 = re.escape(r0) r1 = re.escape(r1) return re.sub(f"{r0}|{r1}", lambda x: replacement[x.group()], s)
bcfe2d13fb3b0c3156e866954d6ddf1a0a02e9fc
702,148
def retry_http(response): """Retry on specific HTTP errors: * 429: Rate limited to 50 reqs/minute. Args: response (dict): Dynatrace API response. Returns: bool: True to retry, False otherwise. """ retry_codes = [429] code = int(response.get('error', {}).get('code', 200)) return code in retry_codes
f45f6e9239d78cfa6ad0ec397e3a5b4a58a655f5
702,150
def circular_wetted_perimeter(angle, diameter): """Returns circle wetted perimeter. :param angle: angle in radians from angle_in_partial_filled_pipe function :param diameter: diameter of pipe [m] """ return angle * diameter
e24cc0839eb3bf78e65f6b9b99b34bf87f77b2cf
702,152
import re def openapi_endpoint_name_from_rule(rule): """Utility function to generate the Open API endpoint name. It replace '/users/<user_id>' with the OpenAPI standard: '/users/{user_id}'. """ name = rule.rule for argument in rule.arguments: openapi_name = f"{{{argument}}}" name = re.sub(fr"<[a-zA-Z:]*{argument}>", openapi_name, name) return name
262ad76302e76017bb88cf469add1c71e0452ea0
702,153
def _fix_axes(tensors, axes, allow_negative): """Makes all axes positive and checks for out of bound errors.""" axes = [ axis + tensor.shape.ndims if axis < 0 else axis for tensor, axis in zip(tensors, axes) ] if not all( ((allow_negative or (not allow_negative and axis >= 0)) and axis < tensor.shape.ndims) for tensor, axis in zip(tensors, axes)): rank_axis_pairs = list( zip([tensor.shape.ndims for tensor in tensors], axes)) raise ValueError( 'Some axes are out of bounds. Given rank-axes pairs: {}'.format( [pair for pair in rank_axis_pairs])) return axes
f3861809c2150e3d069fdba6930ef8a1e7f870da
702,154
def parse_filepaths(fp, studies=None): """ Summary ------- Function to parse filepath and optional study names into a format readable for the calsim_toolkit read/write functions. """ # Check that inputs provided are compatible and zip data into list. if isinstance(fp, str) and (isinstance(studies, str) or not studies): study_fps = [(studies, fp)] elif isinstance(fp, list) and (isinstance(studies, list) or not studies): if studies and (len(fp) != len(studies)): msg = ('List length of file paths `fp` must equal list length of' ' study names.') raise TypeError(msg) if not studies: studies = ['Alt{}'.format(i) for i in range(len(fp))] study_fps = list(zip(studies, fp)) else: msg = 'Inputs provided are incompatible.' raise TypeError(msg) # Return list of tuples. return study_fps
17e6a14967aea1275bf1562ca691dfdb2c1f16d0
702,155
def is_dunder(attr_name: str) -> bool: """ Retuns whether the given attr is a magic/dunder method. :param attr_name: """ return attr_name.startswith("__") and attr_name.endswith("__")
b3a1f10e9fc7fd5c7dbb930be977a814e6b0c37d
702,156
def regularize_layer_weighted(layers, penalty, tags={'regularizable': True}, **kwargs): """ Computes a regularization cost by applying a penalty to a group of layers, weighted by a coefficient for each layer. Parameters ---------- layers : dict A mapping from : tuple of class:`Layer` instances to coefficients. penalty : callable tags: dict Tag specifications which filter the parameters of the layer or layers. By default, only parameters with the `regularizable` tag are included. Should be defined inside the penalty function **kwargs keyword arguments passed to penalty. Returns ------- Theano scalar a scalar expression for the cost """ return sum(coeff * sum(penalty(layer_tuple, tags, **kwargs)) for layer_tuple, coeff in list(layers.items()))
e4de182b3e99ace5fd41c05d2a318fc865a6dd9c
702,158
def compressibility_drag_wing_total(state,settings,geometry): """Sums compressibility drag for all wings combined Assumptions: None Source: adg.stanford.edu (Stanford AA241 A/B Course Notes) Inputs: state.conditions.aerodynamics.drag_breakdown.compressible[wing.tag].compressibility_drag [Unitless] geometry.wings.areas.reference [m^2] geometry.reference_area [m^2] Outputs: total_compressibility_drag [Unitless] Properties Used: N/A """ # unpack conditions = state.conditions wings = geometry.wings #compute parasite drag total total_compressibility_drag = 0.0 # from wings for wing in wings.values(): compressibility_drag = conditions.aerodynamics.drag_breakdown.compressible[wing.tag].compressibility_drag conditions.aerodynamics.drag_breakdown.compressible[wing.tag].compressibility_drag = compressibility_drag * 1. # avoid linking variables total_compressibility_drag += compressibility_drag conditions.aerodynamics.drag_breakdown.compressible.total = total_compressibility_drag return total_compressibility_drag
ccc69d2d9690d5ce2c7dcd752cfde0a88ae484bb
702,159
def convert_df_2_string(df): """ Convert data frame rows to string output where each new line is defined as \n """ # ititialise string output = 'agent,wkt\n' for i, row in df.iterrows(): if i == len(df) - 1: output += str(row['label']) + ',' + str(row['geometry']) else: output += str(row['label']) + ',' + str(row['geometry']) + '\n' # set environment variable ${AGENTS} # os.environ['AGENTS'] = output return output
c8d2717b72f875f0f4ae743a2cc6c82550221447
702,160
import argparse def parse_args(): """ Parse input arguments """ parser = argparse.ArgumentParser(description='Prepare MCG roidb') parser.add_argument('--input', dest='input_dir', help='folder contain input mcg proposals', default='data/MCG-raw/', type=str) parser.add_argument('--output', dest='output_dir', help='folder contain output roidb', required=True, type=str) parser.add_argument('--gt_roi', dest='roidb', help='roidb', default='data/cache/voc_2012_train_gt_roidb.pkl', type=str) parser.add_argument('--gt_mask', dest='maskdb', help='maskdb', default='data/cache/voc_2012_train_gt_maskdb.pkl', type=str) parser.add_argument('-mask_sz', dest='mask_size', help='compressed mask resolution', default=21, type=int) parser.add_argument('--top_k', dest='top_k', help='number of generated proposal', default=-1, type=int) parser.add_argument('--db', dest='db_name', help='train or validation', default='train', type=str) parser.add_argument('--para_job', dest='para_job', help='launch several process', default='1', type=int) return parser.parse_args()
6291c448f90b5ac32610c2037ad9732f108783f5
702,161
def deletealertrulebasedonip(ip, file_name=None): """ Deletes a Snort rule to alert based on traffic incoming from an ip address. Arguments: ip: String with an ip address. filename: String with a path to a Snort rule file. Passed directly to writerule. """ found = False ipGroup = ip.split(" ") if file_name is not None: with open(file_name, 'r+') as f: #read in entire rule file and split by newline, get rid of empty item at end data = f.read().split("\n")[:-1] #overwrite file from beginning f.seek(0) for line in data: for myIp in ipGroup: if myIp in line: found = True #write down anything that doesn't match ips specified if found == False: f.write(line+'\n') f.truncate() else: found = False return 'Success' else: return 'No File Found'
a08f3b7645372c8e05ab6fbcb3faed6d84ccb0d6
702,162
import operator def get_trans_co(x2ys, n_trans): """Use co-occurrences to compute scores.""" x2ys_co = dict() for x, ys in x2ys.items(): ys = [y for y, cnt in sorted(ys.items(), key=operator.itemgetter(1), reverse=True)[:n_trans]] x2ys_co[x] = ys return x2ys_co
390a5fde3274ec770e4d25becbbba726ea6804b7
702,164
from typing import List from typing import Any import random def shuffle_list(elements: List[Any]) -> List[Any]: """ Shuffle the input list in random order :param elements: List to be reshuffled :return: reshuffled list """ random.shuffle(elements) return elements
17a6520fce91e60f1cfe59d31736c2e5f50ded6f
702,165
import math def pool_output(shape, Kernel, Padding=(0, 0, 0), Stride=(1, 1, 1)): """ Z : depth Y : height X : width P : padding K : kernel """ Z, Y, X = shape Z_out = math.floor(((Z + 2 * Padding[0] - (Kernel[0] - 1) - 1) / Stride[0]) + 1) Y_out = math.floor(((Y + 2 * Padding[1] - (Kernel[1] - 1) - 1) / Stride[1]) + 1) X_out = math.floor(((X + 2 * Padding[2] - (Kernel[2] - 1) - 1) / Stride[2]) + 1) return (Z_out, Y_out, X_out)
3506da0743289733df240b19416e42847f94e3d6
702,166
def merge_sort(array): """ ### Merge sort Implementation of one of the most powerful sorting algorithms algorithms.\n Return a sorted array, """ def merge(L, R): res = [] left_ind = right_ind = 0 while left_ind < len(L) and right_ind < len(R): if L[left_ind] < R[right_ind]: res.append(L[left_ind]) left_ind += 1 elif L[left_ind] > R[right_ind]: res.append(R[right_ind]) right_ind += 1 else: res.append(R[right_ind]) res.append(L[right_ind]) left_ind += 1 right_ind += 1 res.extend(L[left_ind:]) res.extend(R[right_ind:]) return res length = len(array) if length <= 1: return array middle_ind = int(length/2) right = merge_sort(array[middle_ind:]) left = merge_sort(array[:middle_ind]) return merge(right, left)
9f2101b0286525490aedce6d6f99962f5e6050f2
702,167
def transform_row_into_list(row): """This function transform a row into a named tuple""" result = row.strip('\n').split(',') return result
7944bd2af1c06cbce44de63a14a856a8ce8699f1
702,168
def split_col_row(ref): """Split the letter and number components of a cell reference. Examples: >>> split_col_row('A1') ('A', 1) >>> split_col_row('B100') ('B', 100) >>> split_col_row('CC12') ('CC', 12) """ head = ref.rstrip("0123456789") tail = ref[len(head) :] return head, int(tail)
ae236aa0521564958bd61643fbce932f1b8a2d99
702,169
import re def check_if_string_in_file( file_name: str, string_to_search: str, search_flag: int = 0 ) -> bool: """Check if any line in the file contains given string""" with open(file_name, "r") as read_obj: file_data = read_obj.read() if re.search(string_to_search, file_data, flags=search_flag): return True return False
e47899377daecc7a47adb0f79920a06de15fe8d8
702,170
def handCard(handPos, game): """ Return the value of indexed card in player's hand :param handPos: :param game: :return: """ return game.players[game.whoseTurn].handCards[handPos]
a5556b37f9f220b1f2a42ab95ef8743dac67f735
702,171
def countHostBits(binaryString): """ This will calculate the number of host bits in the mask """ # count the number of 0s in the subnet string return binaryString.count('0')
f86ca0131470e33b3a83350b2b58bc4621a07574
702,172
def build_model_modified_field_list(queryset): """ Setup the field list for a 'modified' api query, add any Foreign Key field names """ fields = list() fields.append('id') fields.append('modified') if not queryset: return fields try: for field in queryset.model._meta.local_fields: if field.is_relation: fields.append(field.name) except: pass return fields
6a004ec17f03ad7a96ee6ff0f3cf543a3e525465
702,173
def getSiteStore(store): """ Given C{store} find the site store. """ siteStore = store while siteStore.parent: siteStore = siteStore.parent return siteStore
f6ee30355c8f78f12a45b5e58bea8ad96ff6a960
702,174
import gzip def get_file_handle(file_path, compression): """ Returns a file handle to the given path. :param file_path: path to the file to open :param compression: indicates whether or not the input file is compressed :return: a file handle to file_path """ if compression: return gzip.open(file_path, 'rt', encoding='utf-8', errors='strict') else: return open(file_path, 'rt', encoding='utf-8', errors='strict')
44c97b211c4b44679934eede62845c58947c4091
702,175
def solve(firewall): """Return severity if the trip through the firewall. :firewall: list of depth and range of the scanner (separated by a colon) for each layer (separated by newline) :returns: severity of a trip >>> solve('''0: 3 ... 1: 2 ... 4: 4 ... 6: 4''') 24 """ scanners = {int(d) : int(r) for d, r in [line.split(': ') for line in firewall.split('\n')]} severity = 0 for i in range(max(scanners) + 1): try: p = i % (2 * scanners[i] - 2) if p == 0: severity += i * scanners[i] except KeyError: continue return severity
01d8f2af59cfacde1e36f9014ceaa2083ebc0df9
702,176
def _maketrans_c(arg1, arg2, delete=False): """Make a complement tr table for the 'c' flag. If the 'd' flag is passed, then delete=True. Ranges are expanded in arg1 and arg2 but arg2 is not otherwise normalized""" t = str.maketrans(arg1, arg1) d = dict() for i in range(257): if i not in t: if not arg2: if delete: d[i] = None else: d[i] = i elif i < len(arg2): d[i] = arg2[i] elif delete: d[i] = None else: d[i] = arg2[-1] return str.maketrans(d)
75cbc9c0d4df62736c4ae9d04f940d4e7104d77a
702,177
def select(con, id): """ 指定したキーのデータをSELECTする """ cur = con.execute( 'select id, title_en, title_ja, description_en, description_ja, author, created from suggestions where id=?', (id,)) return cur.fetchone()
64f7f96c04dc533446835f6b6d06f7cc4530285e
702,178
def save_params(net, best_metric, current_metric, epoch, save_interval, prefix): """Logic for if/when to save/checkpoint model parameters""" if current_metric < best_metric: best_metric = current_metric net.save_parameters('{:s}_best.params'.format(prefix, epoch, current_metric)) with open(prefix+'_best.log', 'a') as f: f.write('\n{:04d}:\t{:.4f}'.format(epoch, current_metric)) if save_interval and (epoch + 1) % save_interval == 0: net.save_parameters('{:s}_{:04d}_{:.4f}.params'.format(prefix, epoch, current_metric)) return best_metric
9af251965a4facc598c9a3d833f967511cc7b0ec
702,179
import os import json def load_map(indexing_dir): """获得1.栏目id到内容视频的idx序列, 2.视频id到idx 的映射""" res = [] file_list = ["cid2vidx.json", "vid2idx.json"] for file in file_list: file_path = os.path.join(indexing_dir, file) with open(file_path, "r", encoding="utf8") as fp: res.append(json.load(fp)) return res
ec9eb9fc379e35c76d175567ad3408f8b0d893fc
702,180
def convert_pyte_buffer_to_colormap(buffer, lines): """ Convert a pyte buffer to a simple colors """ color_map = {} for line_index in lines: # There may be lines outside the buffer after terminal was resized. # These are considered blank. if line_index > len(buffer) - 1: break # Get line and process all colors on that. If there are multiple # continuous fields with same color we want to combine them for # optimization and because it looks better when rendered in ST3. line = buffer[line_index] line_len = len(line) if line_len == 0: continue # Initialize vars to keep track of continuous colors last_bg = line[0].bg if last_bg == "default": last_bg = "black" last_fg = line[0].fg if last_fg == "default": last_fg = "white" if line[0].reverse: last_color = (last_fg, last_bg) else: last_color = (last_bg, last_fg) last_index = 0 field_length = 0 char_index = 0 for char in line: # Default bg is black if char.bg is "default": bg = "black" else: bg = char.bg # Default fg is white if char.fg is "default": fg = "white" else: fg = char.fg if char.reverse: color = (fg, bg) else: color = (bg, fg) if last_color == color: field_length = field_length + 1 else: color_dict = {"color": last_color, "field_length": field_length} if last_color != ("black", "white"): if line_index not in color_map: color_map[line_index] = {} color_map[line_index][last_index] = color_dict last_color = color last_index = char_index field_length = 1 # Check if last color was active to the end of screen if last_color != ("black", "white"): color_dict = {"color": last_color, "field_length": field_length} if line_index not in color_map: color_map[line_index] = {} color_map[line_index][last_index] = color_dict char_index = char_index + 1 return color_map
d16e8aeeb327bfa75af3ba76d339c0a2538dcfa7
702,181
import random def read_meminfo(): """ Mocks read_meminfo as this is a Linux-specific operation. """ return { "MemTotal": random.randint(0, 999999999), "MemFree": random.randint(0, 999999999), "MemAvailable": random.randint(0, 999999999), "HugePages_Total": random.randint(0, 999999999), }
6bdf66ded424748736875d70eae4b54d4a820c28
702,183
def str_to_vec(sequences): """converts nucleotide strings into vectors using a 2-bit encoding scheme.""" vecs = [] nuc2bit = {"A": (0, 0), "C": (0, 1), "T": (1, 0), "G": (1, 1)} for seq in sequences: vec = [] for nuc in seq: vec.append(nuc2bit[nuc][0]) vec.append(nuc2bit[nuc][1]) vecs.append(vec) return vecs
952e35253c275ef4424410b024338da1a11b20e7
702,184
import operator def vector_add(a, b): """Component-wise addition of two vectors. >>> vector_add((0, 1), (8, 9)) (8, 10) """ return tuple(map(operator.add, a, b))
2144a02128ffa8712cfb998045ede1ca9308650f
702,185
import json def get_aws_key_and_secret(aws_creds_file_path): """ Given a filename containing AWS credentials (see README.md), return a 2-tuple (access key, secret key). """ with open(aws_creds_file_path, 'r') as f: creds_dict = json.load(f) return creds_dict['accessKeyId'], creds_dict['secretAccessKey']
b3eae6ee0283a7245d37f92b5a5f4ef1102e248d
702,186
def probability(df, features): """ Calculates the occurence probability of all the categories for every feature. Parameters ---------- df : panda dataframe the dataset of the population features : dictionary a dictionary of features with keys as feature name and values as objects of feature class. Returns ------- features : dictionary a dictionary of features with keys as feature name and values as objects of feature class with updated probability values. """ for feat in features.keys(): series = df[feat].value_counts(normalize=True,sort=False) for cat in series.index: features[feat].categories[cat].prob = series[cat] return features
7f46f2ec0fa69b22fea0a2b4a0e9ffc691630b1d
702,187
def freq_id_to_stream_id(f_id): """ Convert a frequency ID to a stream ID. """ pre_encode = (0, (f_id % 16), (f_id // 16), (f_id // 256)) stream_id = ( (pre_encode[0] & 0xF) + ((pre_encode[1] & 0xF) << 4) + ((pre_encode[2] & 0xF) << 8) + ((pre_encode[3] & 0xF) << 12) ) return stream_id
f89d52adf4390f665e069c2b5f4f5accc22709b8
702,189
import os def get_all_folders(path): """ :param path: :return: """ return [path + '/' + i for i in os.listdir(path)]
ba944c8d2bec3450fdf4fb1fc7b60e7675ddce7a
702,190
import dateutil.parser def get_entry_end_date(e): """Returns end date for entry""" return dateutil.parser.parse(e['time_end'])
a9b8bdae873de0ef97de49e342cd4f3bbd8117f6
702,191
from pathlib import Path def get_launch_agents_dir() -> Path: """Returns user LaunchAgents directory.""" launch_agents_dir = Path.home() / "Library" / "LaunchAgents" assert launch_agents_dir.is_dir() return launch_agents_dir
d11bc00c986bd5440549e71fdae9ed1f98f18d21
702,192
def yddot_d_z(mu, state, r_15_inv, r_25_inv): """ Partial of x acceleration with respect to z Args: mu (float): three body constant state (np.array): 6 dimensional state vector of (x, y, z, dx, dy, dz) r_15_inv (float): 1 / norm(r_1)^(5) where r_1 is the vector from the primary (i.e. Earth) to the satellite r_25_inv (float): 1 / norm(r_2)^(5) where r_2 is the vector from the secondary (i.e. Moon) to the satellite Returns: float """ x, y, z = state[:3] ans = 3 * mu * y * z * r_25_inv \ + 3 * (1 - mu) * y * z * r_15_inv return ans
18281dc5dffdef99e38c33e28cc26a0e9d9aa262
702,193
def decypher(text): """ Decypher file name into descriptive label for legend """ # name shortcuts help_dict = {"h": "Target/Hunt AI", "p": "Probabilistic AI", "r": "Random AI"} final = "" t_split = text.split("-") final += help_dict[t_split[0]] # hunt/target AI branch if t_split[0] == "h": if t_split[1] == "True": final += ", parity" if t_split[2] == "True": final += ", cheating" final += "(knows "+t_split[3]+" ships)" # probabilistic AI branch if t_split[0] == "p": if t_split[1] == "True": final += ", cheating" final += "(knows "+t_split[2]+" ships)" return final
bf4db039bcc86d8d874a29dbf301cc91fa461560
702,194
import re def compute_rq_type(oslevel, empty_list): """Compute rq_type. return: Latest when oslevel is blank or latest (not case sensitive) Latest when oslevel is a TL (6 digits) and target list is empty TL when oslevel is xxxx-xx(-00-0000) SP when oslevel is xxxx-xx-xx(-xxxx) ERROR when oslevel is not recognized """ if oslevel is None or not oslevel.strip() or oslevel.upper() == 'LATEST': return 'Latest' if re.match(r"^([0-9]{4}-[0-9]{2})$", oslevel) and empty_list: return 'Latest' if re.match(r"^([0-9]{4}-[0-9]{2})(|-00|-00-0000)$", oslevel): return 'TL' if re.match(r"^([0-9]{4}-[0-9]{2}-[0-9]{2})(|-[0-9]{4})$", oslevel): return 'SP' return 'ERROR'
753e54d4858a8d1248958c15bbd6b1a0cbc9b02e
702,196
import re def convert_input_paths(argo_json): """ argo aggregation is not valid json as properties are not enclosed in quotes: flow/step/[{task-id:flow-step-3119439657},{task-id:flow-step-195521861},{task-id:flow-step-3020891073}] Parameters ---------- argo_json Returns ------- list of task-ids to be consumed by metaflow join step: flow/step/:flow-step-3119439657,flow-step-195521861,flow-step-3020891073 """ flow, run_id, task_ids = argo_json.split('/') task_ids = re.sub('[\[\]{}]', '', task_ids) task_ids = task_ids.split(',') tasks = [t.split(":")[1] for t in task_ids] return '{}/{}/:{}'.format(flow, run_id, ','.join(tasks))
2a1e5ddd378546343d5532f304145cb2157244b5
702,197