content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import os def _format_echo(text): """Compose system echo command outputs text""" quote = '' if os.name == 'nt' else '"' return 'echo {}{}{}'.format(quote, text, quote)
65dde7b473b618a957c3eddd4bb205df5d9cb674
709,460
def target_frame(): """Input frame.""" return 'IAU_ENCELADUS'
8c57ab924a7b4471ac2f549493ebc176e853c652
709,461
def get_identifier(positioner_id, command_id, uid=0, response_code=0): """Returns a 29 bits identifier with the correct format. The CAN identifier format for the positioners uses an extended frame with 29-bit encoding so that the 11 higher bits correspond to the positioner ID, the 8 middle bits are the command number, the following 6 bits are the unique identifier, and the 4 lower bits are the response code. Parameters ---------- positioner_id : int The Id of the positioner to command, or zero for broadcast. command_id : int The ID of the command to send. uid : int The unique identifier response_code : int The response code. Returns ------- identifier : `int` The decimal integer corresponding to the 29-bit identifier. Examples -------- :: >>> get_identifier(5, 17, uid=5) 1328128 >>> bin(1328128) '0b101000100010000000000' """ posid_bin = format(positioner_id, "011b") cid_bin = format(command_id, "08b") cuid_bin = format(uid, "06b") response_bin = format(int(response_code), "04b") identifier = posid_bin + cid_bin + cuid_bin + response_bin assert len(identifier) == 29 return int(identifier, 2)
57a1ce7004186e8c1c88c06665311e71010705c4
709,462
def standardized(array): """Normalize the values in an array. Arguments: array (np.ndarray): Array of values to normalize. Returns: array with zero mean and unit standard deviation. """ return (array - array.mean()) / max(1e-4, array.std())
1764dfd1e4e173d2ca081edeb8b7165a79d63b7d
709,463
def get_file_name(): """This function asl the user for file and returns it""" f_name = input('Input your file name: ') return f_name
5d3e524ebe423410f721afb070bfba9d804ed19f
709,464
import requests def deletecall(bam_url,api_call,call_parameters,delete_entity,header): """API request to delete and return values""" call_url = "http://"+bam_url+"/Services/REST/v1/"+api_call+"?" print("You are requesting to delete:") print(delete_entity) answer = input("Do you want to proceed (y (yes) or n (no))? ") try: if answer.lower() == "y": response = requests.delete(call_url,params=call_parameters, headers=header) return response.json() elif answer.lower() == "n": return "You aborted deletion" else: return "You entered an invalid character" except requests.exceptions.RequestException as e: print(e)
f6cffd225b9dd8d4d387b472d5ef522e2a48d738
709,465
def get_divmod(up, down, minute=False, limit=2): """ 获取商 :param up: 被除数 :param down: 除数 :param minute: 换算成分钟单位 :param limit: 保留小数的位数 :return: 商 """ if up == 0: return 0 if down == 0: return 0 if minute: return round(up/down/60.0, limit) return round(float(up)/down, limit)
253304cde82fd4a3aa70737f4caabb20b5166349
709,467
def update_with_error(a, b, path=None): """Merges `b` into `a` like dict.update; however, raises KeyError if values of a key shared by `a` and `b` conflict. Adapted from: https://stackoverflow.com/a/7205107 """ if path is None: path = [] for key in b: if key in a: if isinstance(a[key], dict) and isinstance(b[key], dict): update_with_error(a[key], b[key], path + [str(key)]) elif a[key] == b[key]: pass # same leaf value elif a[key] is None: a[key] = b[key] elif (isinstance(a[key], (list, tuple)) and not isinstance(a[key], str) and isinstance(b[key], (list, tuple)) and not isinstance(b[key], str) and len(a[key]) == len(b[key]) and all((av is None or av == bv) for av, bv in zip(a[key], b[key]))): # yapf: disable a[key] = b[key] else: raise KeyError('Conflict at {}: {} vs. {}'.format('.'.join(path + [str(key)]), a[key], b[key])) else: a[key] = b[key] return a
201650bba4fcae21d353f88ff22a9559aea61ff4
709,468
def tree_unflatten(flat, tree, copy_from_tree=None): """Unflatten a list into a tree given the tree shape as second argument. Args: flat: a flat list of elements to be assembled into a tree. tree: a tree with the structure we want to have in the new tree. copy_from_tree: optional list of elements that we just copy from tree. This argument is used when the flat version does not contain all elements of the expected tree but just a subset, while the rest are filled from the tree itself. It allows to omit "unnecessary" elements. For example, consider trees (A, (B, X), X) and (X, (A, X), B) where X is some element we do not care about. Flattening the first tree and removing X will yield a flat list [A, B] and the second tree can then be reconstructed from this list and the tree (X, (E, X), E) with copy_from_tree=[X]. One example where this is used is the weights-tree of a model, where layers with no weights have () in the tree and we use copy_from_tree=[()] to restore a model from a file that only has a list of trainable weights. Returns: A pair (new_tree, rest_of_flat) where the new tree that has the structure of tree but with leaves from flat, and the remaining elements of flat if more were provided than the number of leaves of tree (useful for recursion). """ if copy_from_tree is not None and tree in copy_from_tree: return tree, flat if isinstance(tree, (list, tuple)): new_tree, rest = [], flat for t in tree: new_t, rest = tree_unflatten(rest, t, copy_from_tree=copy_from_tree) new_tree.append(new_t) new_tree = tuple(new_tree) if isinstance(tree, tuple) else new_tree return new_tree, rest if isinstance(tree, dict): new_tree, rest = {}, flat for k in tree: new_v, rest = tree_unflatten(rest, tree[k], copy_from_tree=copy_from_tree) new_tree[k] = new_v return new_tree, rest return flat[0], flat[1:]
711bc67a20835091360d0fbc64e0a8842eec53ba
709,470
def soma_radius(morph): """Get the radius of a morphology's soma.""" return morph.soma.radius
2f9991a2f9240965bdb69a1a14814ed99bf60f86
709,471
def smaller_n(n1, n2): """ Compare two N_Numbers and returns smaller one. """ p1, s1 = n1 p2, s2 = n2 p1l = len(str(p1)) + s1 p2l = len(str(p2)) + s2 if p1l < p2l: return n1 elif p1l > p2l: return n2 p1 = p1.ljust(36, '9') p2 = p2.ljust(36, '9') if p1 <= p2: return n1 else: return n2
1f5922b74bdb8e5ee4dba7a85a9a70efdb024c59
709,472
def sortDict(dictionary: dict): """Lambdas made some cringe and stupid thing some times, so this dirty thing was developed""" sortedDictionary = {} keys = list(dictionary.keys()) keys.sort() for key in keys: sortedDictionary[key] = dictionary[key] return sortedDictionary
ed61adf95f2b8c1c4414f97d84b8863596681478
709,473
def damerau_levenshtein_distance(word1: str, word2: str) -> int: """Calculates the distance between two words.""" inf = len(word1) + len(word2) table = [[inf for _ in range(len(word1) + 2)] for _ in range(len(word2) + 2)] for i in range(1, len(word1) + 2): table[1][i] = i - 1 for i in range(1, len(word2) + 2): table[i][1] = i - 1 da = {} for col, c1 in enumerate(word1, 2): last_row = 0 for row, c2 in enumerate(word2, 2): last_col = da.get(c2, 0) addition = table[row - 1][col] + 1 deletion = table[row][col - 1] + 1 substitution = table[row - 1][col - 1] + (0 if c1 == c2 else 1) transposition = ( table[last_row - 1][last_col - 1] + (col - last_col - 1) + (row - last_row - 1) + 1 ) table[row][col] = min(addition, deletion, substitution, transposition) if c1 == c2: last_row = row da[c1] = col return table[len(word2) + 1][len(word1) + 1]
7b75bb94fe66897c1807ac185d8602ea2b3ebd67
709,474
import json def get_repo_info(main_path): """ Get the info of repo. Args: main_path: the file store location. Return: A json object. """ with open(main_path + '/repo_info.json') as read_file: repo_info = json.load(read_file) return repo_info
f4a538819add0a102f6cbe50be70f2c9a0f969b6
709,475
import yaml def parse_settings(settings_file: str) -> dict: """ The function parses settings file into dict Parameters ---------- settings_file : str File with the model settings, must be in yaml. Returns ------- ydict : dict Parsed settings used for modeling. """ with open(settings_file, 'r') as fstream: ydict = yaml.safe_load(fstream) return ydict
1aec2a8be51376209db81d60115814ddefca7ea6
709,476
def get_mac_address(path): """ input: path to the file with the location of the mac address output: A string containing a mac address Possible exceptions: FileNotFoundError - when the file is not found PermissionError - in the absence of access rights to the file TypeError - If the function argument is not a string. """ if type(path) is not str: raise TypeError("The path must be a string value") try: file = open(path) except FileNotFoundError as e: raise e except PermissionError as e: raise e return file.readline().strip().upper()
814a530b63896103adcb8fbc84d17939644b9bbe
709,477
def jwt_get_username_from_payload_handler(payload): """ Override this function if username is formatted differently in payload """ return payload.get('name')
92d60ce714632571346e93459729dcf1d764617b
709,478
def find_all_movies_shows(pms): # pragma: no cover """ Helper of get all the shows on a server. Args: func (callable): Run this function in a threadpool. Returns: List """ all_shows = [] for section in pms.library.sections(): if section.TYPE in ('movie', 'show'): all_shows += section.all() return all_shows
ca4a8a5f4b2c1632ea6e427c748ef790c896b3ba
709,479
def parse_vars(vars): """ Transform a list of NAME=value environment variables into a dict """ retval = {} for var in vars: key, value = var.split("=", 1) retval[key] = value return retval
e2c6ae05cdf0151caaf8589eb7d7df90dcdd99a1
709,480
from typing import List import collections def find_dup_items(values: List) -> List: """Find duplicate items in a list Arguments: values {List} -- A list of items Returns: List -- A list of duplicated items """ dup = [t for t, c in collections.Counter(values).items() if c > 1] return dup
3a84c2f3b723bed9b7a82dc5f0cfd81d99c2bf48
709,481
def circle_location_Pass(circle_, image_, margin=0.15): """ Function for check if the circle_ is overlapping with the margin of the image_. """ cy, cx, rad, accum = circle_ image_sizeY_, image_sizeX_ = image_.shape[0], image_.shape[1] margin_min_x = int(image_sizeX_ * margin) margin_max_x = int(image_sizeX_ * (1 - margin)) margin_min_y = int(image_sizeY_ * margin) margin_max_y = int(image_sizeY_ * (1 - margin)) margin_min_xh = int(image_sizeX_ * margin/2.) margin_max_xh = int(image_sizeX_ * (1 - margin/2.)) margin_min_yh = int(image_sizeY_ * margin/2.) margin_max_yh = int(image_sizeY_ * (1 - margin/2.)) if cy<margin_min_y or cy>margin_max_y: return False if cx<margin_min_x or cx>margin_max_x: return False if cy-rad<margin_min_yh or cy+rad>margin_max_yh: return False if cx-rad<margin_min_xh or cx+rad>margin_max_xh: return False return True
4ad94552bc1bf06282a691edede89a65f8b9c328
709,482
def _cache_key_format(lang_code, request_path, qs_hash=None): """ função que retorna o string que será a chave no cache. formata o string usando os parâmetros da função: - lang_code: código do idioma: [pt_BR|es|en] - request_path: o path do request - qs_hash: o hash gerado a partir dos parametros da querystring (se não for None) """ cache_key = "/LANG=%s/PATH=%s" % (lang_code, request_path) if qs_hash is not None: cache_key = "%s?QS=%s" % (cache_key, qs_hash) return cache_key
365b1ff144f802e024da5d6d5b25b015463da8b3
709,483
import inspect def form_of(state): """Return the form of the given state.""" if hasattr(state, "__form__"): if callable(state.__form__) and not inspect.isclass(state.__form__): return state.__form__() else: return state.__form__ else: raise ValueError(f"{state} has no form")
e39aa7db7b324ab38b65232b34b987b862812c54
709,484
def WebChecks(input_api, output_api): """Run checks on the web/ directory.""" if input_api.is_committing: error_type = output_api.PresubmitError else: error_type = output_api.PresubmitPromptWarning output = [] output += input_api.RunTests([input_api.Command( name='web presubmit', cmd=[ input_api.python_executable, input_api.os_path.join('web', 'web.py'), 'presubmit', ], kwargs={}, message=error_type, )]) return output
5fb828cc98da71bd231423223336ec81e02505ff
709,485
def get_auth_token(): """ Return the zerotier auth token for accessing its API. """ with open("/var/snap/zerotier-one/common/authtoken.secret", "r") as source: return source.read().strip()
bd74fde05fbb375f8899d4e5d552ad84bcd80573
709,486
import torch def _switch_component( x: torch.Tensor, ones: torch.Tensor, zeros: torch.Tensor ) -> torch.Tensor: """ Basic component of switching functions. Args: x (torch.Tensor): Switch functions. ones (torch.Tensor): Tensor with ones. zeros (torch.Tensor): Zero tensor Returns: torch.Tensor: Output tensor. """ x_ = torch.where(x <= 0, ones, x) return torch.where(x <= 0, zeros, torch.exp(-ones / x_))
8d60c09428440be704e8ced9b8ac19219a0d0b04
709,487
def get_vector(x_array, y_array, pair): """This function is for calculating a vector of a bone from the openpose skelleton""" x = x_array[:,pair[0]]-x_array[:,pair[1]] y = y_array[:,pair[0]]-y_array[:,pair[1]] return [x, y]
e2bfcce3952c6b0a2c8cd9c67c4cd7b52547694d
709,488
import argparse def defineConsole(): """ defines the program console line commands """ parser = argparse.ArgumentParser(description="SBML to BNGL translator") parser.add_argument( "-f1", "--file1", type=str, help="reference file", required=True ) parser.add_argument( "-f2", "--file2", type=str, help="comparison file", required=True ) # parser.add_argument('-o', '--output', type=str, help='output file', required=True) return parser
77f403040cf250810c5b4098c6b9818e5f17117e
709,489
def opts2dict(opts): """Converts options returned from an OptionParser into a dict""" ret = {} for k in dir(opts): if callable(getattr(opts, k)): continue if k.startswith('_'): continue ret[k] = getattr(opts, k) return ret
cfa828f0248ff7565aabbb5c37a7bc6fa38c6450
709,490
def _mag_shrink_hard(x, r, t): """ x is the input, r is the magnitude and t is the threshold """ gain = (r >= t).float() return x * gain
da795bcfc2a6e4bfa3e54d1334c9d8865141a4f1
709,491
import os def expand_path(path): """ Convert a path to an absolute path. This does home directory expansion, meaning a leading ~ or ~user is translated to the current or given user's home directory. Relative paths are relative to the current working directory. :param path: Relative or absolute path of file. :return: Absolute path """ return os.path.abspath(os.path.expanduser(path))
dc73eb377fd5b16091596f4345ee024c3d42e5bc
709,492
import math def smaller2k(n): """ Returns power of 2 which is smaller than n. Handles negative numbers. """ if n == 0: return 0 if n < 0: return -2**math.ceil(math.log2(-n)) else: return 2**math.floor(math.log2(n))
0d0bbbf95cb22bf1b9ffb29012075534bcc9646d
709,493
def unwind(g, num): """Return <num> first elements from iterator <g> as array.""" return [next(g) for _ in range(num)]
59b724ca27729b4fc20d19a40f95d590025307c4
709,494
import re def CPPComments(text): """Remove all C-comments and replace with C++ comments.""" # Keep the copyright header style. line_list = text.splitlines(True) copyright_list = line_list[0:10] code_list = line_list[10:] copy_text = ''.join(copyright_list) code_text = ''.join(code_list) # Remove */ for C-comments, don't care about trailing blanks. comment_end = re.compile(r'\n[ ]*\*/[ ]*') code_text = re.sub(comment_end, '', code_text) comment_end = re.compile(r'\*/') code_text = re.sub(comment_end, '', code_text) # Remove comment lines in the middle of comments, replace with C++ comments. comment_star = re.compile(r'(?<=\n)[ ]*(?!\*\w)\*[ ]*') code_text = re.sub(comment_star, r'// ', code_text) # Remove start of C comment and replace with C++ comment. comment_start = re.compile(r'/\*[ ]*\n') code_text = re.sub(comment_start, '', code_text) comment_start = re.compile(r'/\*[ ]*(.)') code_text = re.sub(comment_start, r'// \1', code_text) # Add copyright info. return copy_text + code_text
0dd490f5497c073534abc30944bd49d0a3cf7e3e
709,495
def to_dict(doc, fields): """Warning: Using this convenience fn is probably not as efficient as the plain old manually building up a dict. """ def map_field(prop): val = getattr(doc, prop) if isinstance(val, list): return [(e.to_dict() if hasattr(e, 'to_dict') else e) for e in val] else: return val.to_dict() if hasattr(val, 'to_dict') else val return {f: map_field(f) for f in fields}
cb51e3dfdf8c313f218e38d8693af9e7c6bf5045
709,496
def read_varint(stream: bytes): """ 读取 varint。 Args: stream (bytes): 字节流。 Returns: tuple[int, int],真实值和占用长度。 """ value = 0 position = 0 shift = 0 while True: if position >= len(stream): break byte = stream[position] value += (byte & 0b01111111) << shift if byte & 0b10000000 == 0: break position += 1 shift += 7 return value, position + 1
58c8187501dc08b37f777256474f95412649bf04
709,497
def any(array, mapFunc): """ Checks if any of the elements of array returns true, when applied on a function that returns a boolean. :param array: The array that will be checked, for if any of the elements returns true, when applied on the function. \t :type array: [mixed] \n :param mapFunc: The function that gives a boolean value, when applied on the element of the array. \t :type mapFunc: function \n :returns: Whether any of the elements of the array, returned true or not. \t :rtype: : bool \n """ for elem in array: if mapFunc(elem): return True return False
1e635da691fd1c2fc9d99e15fd7fa0461a7bdf0e
709,498
import pickle def load_agent(agent_args, domain_settings, experiment_settings): """ This function loads the agent from the results directory results/env_name/method_name/filename Args: experiment_settings Return: sarsa_lambda agent """ with open('results/' + experiment_settings['env'] + '/sarsa_lambda/agents/' + experiment_settings['filename'] + '.pkl', 'rb') as input: my_agent = pickle.load(input) return my_agent, None
a5769c952d9fcc583b8fb909e6e772c83b7126ca
709,499
def get_sample_activity_from_batch(activity_batch, idx=0): """Return layer activity for sample ``idx`` of an ``activity_batch``. """ return [(layer_act[0][idx], layer_act[1]) for layer_act in activity_batch]
0302fdf215e63d6cbcd5dafc1bd36ae3d27712f2
709,500
def get_field(self, *args, is_squeeze=False, node=None, is_rthetaz=False): """Get the value of variables stored in Solution. Parameters ---------- self : SolutionData an SolutionData object *args: list of strings List of axes requested by the user, their units and values (optional) Returns ------- field: array an array of field values """ axname, _ = self.get_axes_list() symbol = self.field.symbol if len(args) == 0: field_dict = self.field.get_along(tuple(axname), is_squeeze=is_squeeze) else: field_dict = self.field.get_along(*args, is_squeeze=is_squeeze) field = field_dict[symbol] return field
e93455cbc4b306762336fd13603342e9d92badd1
709,501
def _extract_bike_location(bike, lon_abbrev='lon'): """ Standardize the bike location data from GBFS. Some have extra fields, and some are missing fields. Arguments: bike (dict[str, str]): A GBFS bike object as it appears in free_bike_status.json lon_abbrev (str): The abbreviation used for `longitude` Returns: dict[str, str]: A normalized GBFS bike object """ output = {key: bike.get(key) for key in ['bike_id', 'lat', 'is_reserved', 'is_disabled']} output['lon'] = bike.get(lon_abbrev) return output
a20929a85c993a59b82b552fcfee81b1f818648d
709,502
def clean_word(word): """Return word in lowercase stripped of whitespace""" return word.strip().lower()
ce57fa95ec111ee18c8a00c2076c686bc0abfe5c
709,503
def getSentB(text2: str, offsetB: int, nextPoint: int, sentLength: int): """ alignSentences auxiliar function to get the sentences of the original text. """ posB = text2[offsetB+sentLength:].find('.') sentLength += posB+1 sentB = text2[offsetB:offsetB+sentLength] nextPoint = offsetB + sentLength return sentB, nextPoint, sentLength
54914a3c1d85464c0e5a4267538a73693e3df238
709,504
import requests def redirect_page(source_url, destination_url): """returns False is current page is not 200""" def _check_redirect(full_url): print('Getting ' + full_url) response = requests.get(full_url, allow_redirects=False) if response.status_code == 200: print("Was 200") return True elif response.status_code == 404: print("Was 404") return False elif response.status_code == 301: print("Was 301") return False else: raise Exception("UNEXPECTED STATUS CODE {} FOR {}".format( response.status_code, full_url)) return True full_source_url = 'https://www.gov.uk' + source_url full_destination_url = 'https://www.gov.uk' + destination_url return _check_redirect(full_source_url) and _check_redirect( full_destination_url)
8caa9db41948f44cc015ca51f179ff318eb22ada
709,505
def WrapWithQuotes(text, quote='"'): """ Wrap the supplied text with quotes Args: text: Input text to wrap quote: Quote character to use for wrapping (default = "") Returns: Supplied text wrapped in quote char """ if not text.startswith(quote): text = quote + text if not text.endswith(quote): text = text + quote return text
f4f7b83d60e3ea928e3502b9d19ca4c8d52914b9
709,506
import os def get_fastsync_bin(venv_dir, tap_type, target_type): """ Get the absolute path of a fastsync executable """ source = tap_type.replace('tap-', '') target = target_type.replace('target-', '') fastsync_name = f'{source}-to-{target}' return os.path.join(venv_dir, 'pipelinewise', 'bin', fastsync_name)
113835e7620bd378d87cdd162b842e1f7c3a86dc
709,507
from typing import Callable def find_function_in_object(o: object, function_name: str) -> Callable: """Finds a callable object matching given function name in given object. Args: o: Any object. function_name: Name of attribute within o. Returns: Callable object with name <function_name> in object <o>. Raises: LookupError: if <function_Name> is not a callable object in <o>. """ try: function_handle = getattr(o, function_name) if not hasattr(function_handle, "__call__"): raise LookupError( f"Resolved object {function_name} in object {o} is not a function." ) else: return function_handle except AttributeError: raise LookupError(f"Cannot find function {function_name} in object {o}.")
c3b6ad12f42d005f643bc8a657f728613bd0e93c
709,508
from datetime import datetime import logging import os def get_standard_logger(name, log_dir=None): """Function to return an instance of type logger.""" if log_dir is None: log_dir = '/Users/teaton/dev/fantasyAM/logs' time_stamp = datetime.now().strftime('%Y%m%d_%H%M%S') logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') logger = logging.getLogger(name) logger.setLevel(logging.INFO) # Create a file handler os.makedirs(log_dir, exist_ok=True) handler = logging.FileHandler(os.path.join(log_dir, f'{name}_{time_stamp}.log')) handler.setLevel(logging.INFO) # Create a logging format formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') handler.setFormatter(formatter) # add the handlers to the logger logger.addHandler(handler) return logger
1e0c4d6ea3ee74d7f5c88f2861369fd3a37f7750
709,510
from typing import Any def increment_occurance_dict(d: dict, k: Any) -> None: """ Increment occurance dict, updates in-place so nothing is returned. """ try: d[k] += 1 except KeyError: d[k] = 1 return None
725b437494f4c647848c54a3d13b4e974fa7f0e8
709,511
def is_bool(space, w_obj): """ Finds out whether a variable is a boolean""" return space.wrap(w_obj.tp == space.tp_bool)
39b62ec08ebbdd4d7505e558ad4901ca67afc12d
709,512
import os import yaml import logging def read_config(): """Parses config and returns config values :returns: config as dict """ dirname = os.path.dirname(__file__) config_path = os.path.join(dirname, 'config.yaml') try: stream = open(config_path, "r") except FileNotFoundError: return None try: config = yaml.safe_load(stream) except yaml.YAMLError as exception: logging.error("YAML error while parsing config.yaml:\n%s", exception) exit() # Remove / on the end of url if "url" in config: config["url"] = config["url"].rstrip("/") return config
d8584727983880591675fcb99dbcc4b9a3a75626
709,513
def air_density(t_f, elevation): """Eq 20, page 25""" return (1.293 - 1.525e-4 * elevation + 6.379e-9 * elevation ** 2) / ( 1 + 0.00367 * t_f )
d5677c755fc52e1ae8cc5293d4ed5c9a4debb71d
709,514
def _strip_after_new_lines(s): """Removes leading and trailing whitespaces in all but first line.""" lines = s.splitlines() if len(lines) > 1: lines = [lines[0]] + [l.lstrip() for l in lines[1:]] return '\n'.join(lines)
247cee0f34ab1e742069e05c8c00095cd24d80bc
709,515
def log_error(message: str) -> str: """error log""" return message
dbd86c39bc504dbac8d308e124c73310df21f372
709,516
def valid_post_author(user, post): """This function checks whether the post was created by the user""" if str(user.key().id()) == str(post.user.key().id()): return True
94ca2f23aa66f79be997080c61fc2f265e868e5f
709,517
import functools def has_vanity_name(func): """Decorator checking whether a command has been provided a vanity_name value""" @functools.wraps(func) async def wrapper(*args, **kwargs): vanity_name = args[1] if vanity_name is None: ctx = args[0] await ctx.send("Please provide a Steam vanity URL or steamid") return return await func(*args, **kwargs) return wrapper
5da3cc410822f0e112a2be1b3cdfc66fb4d79b0c
709,518
from typing import Tuple def arm_name_to_sort_key(arm_name: str) -> Tuple[str, int, int]: """Parses arm name into tuple suitable for reverse sorting by key Example: arm_names = ["0_0", "1_10", "1_2", "10_0", "control"] sorted(arm_names, key=arm_name_to_sort_key, reverse=True) ["control", "0_0", "1_2", "1_10", "10_0"] """ try: trial_index, arm_index = arm_name.split("_") return ("", -int(trial_index), -int(arm_index)) except (ValueError, IndexError): return (arm_name, 0, 0)
c29958bb541a9754e7b4defc6ad953030a364d2f
709,519
from typing import Dict from typing import Any def replace_module_prefix( state_dict: Dict[str, Any], prefix: str, replace_with: str = "", ignore_prefix: str = "" ): """ Remove prefixes in a state_dict needed when loading models that are not VISSL trained models. Specify the prefix in the keys that should be removed. Added by DLM contributors: ignore_prefix is used to ignore certain keys in the state dict """ state_dict = { (key.replace(prefix, replace_with, 1) if key.startswith(prefix) else key): val for (key, val) in state_dict.items() if ((not key.startswith(ignore_prefix)) or ignore_prefix == "") } return state_dict
b8499c818053e7798e9549fbe546bab7d5fbfa84
709,520
def crop(img, left, top, right, bottom): """ Crop rectangle from image. Inputs: img - The image to crop. left - The leftmost index to crop the image. top - The topmost index. right - The rightmost index. bottom - The bottommost index. Outputs: img - The cropped image. """ return img[left:right, top:bottom]
1507a55bba07dc656f51f873d2328b69f70682c9
709,521
import ipaddress def get_hosts(network): """get_hosts() will return all the hosts within a provided network, range""" network = ipaddress.IPv4Network(network, strict=False) hosts_obj = network.hosts() hosts = [] for i in hosts_obj: hosts.append(str(i)) return hosts
097fa3abbf1cda1c3c0ddc0c2fec4a06d1d44fa9
709,522
def select_organization(cursor): """organization情報取得(全取得) Args: cursor (mysql.connector.cursor): カーソル Returns: dict: select結果 """ # select実行 cursor.execute('SELECT * FROM organization ORDER BY organization_id') rows = cursor.fetchall() return rows
6e5c1a2f90d41223ba09fe3278353370515c0430
709,523
def flax_tag(arr): """Wraps a value in a flax module, to inspect intermediate values.""" return arr
be2fbef6117c859b7fc9dd7274815df4e70df17e
709,524
import requests import json def sendNotification(token, title, message, extraData=None, channelID=None): """ send Notification to Devices :param token: :param title: :param message: :return: """ url = 'https://exp.host/--/api/v2/push/send' headers = { "Content-Type": "application/json" } data = { "to": token, "title": title, "body": message } # Verify we have Additional data to append if extraData is not None: data["data"] = extraData # Android Only! Verify if we have a channel ID and append it if channelID is not None: data["channelId"] = channelID res = requests.post(url, data=json.dumps(data), headers=headers) return res.status_code
1038dfd3872221a0d447b7708d58d95e931c59e5
709,525
import random def select(weights): """ select a node with probability proportional to its "weight" """ r = random.random() * sum(weights) s = 0.0 for k,w in enumerate(weights): s += w if r <= s: return k raise RuntimeError("select WTF from %s" % weights)
fed92de65cfae6f3532754215f5b88a564365ac7
709,526
def kexo(spacecraft_id, sensor_id, band_id): """Sun exo-atmospheric irridiance [W/m2/sr] This is used for processing surface reflectance. Spacecraft_id: Landsat7 Sensor_id: ETM+ band_id: band1, band2, band3, band4, band5, band7, band8 Spacecraft_id: Terra Sensor_id: Aster band_id: band1, band2, band3, band4, band5, band7, band8, band9 kexo(spacecraft_id, sensor_id, band_id) """ if(spacecraft_id == "Landsat7"): if (sensor_id == "ETM+"): if(band_id == "band1"): kexo = 1969.0 if(band_id == "band2"): kexo = 1840.0 if(band_id == "band3"): kexo = 1551.0 if(band_id == "band4"): kexo = 1044.0 if(band_id == "band5"): kexo = 225.7 if(band_id == "band7"): kexo = 82.07 if(band_id == "band8"): kexo = 1385.64 # Self calculated value... else: kexo = 0.0 if(spacecraft_id == "Terra"): if (sensor_id == "Aster"): if(band_id == "band1"): kexo = 1828.0 if(band_id == "band2"): kexo = 1559.0 if(band_id == "band3"): kexo = 1045.0 if(band_id == "band4"): kexo = 226.73 if(band_id == "band5"): kexo = 86.50 if(band_id == "band7"): kexo = 74.72 if(band_id == "band8"): kexo = 66.41 if(band_id == "band9"): kexo = 59.83 else: kexo = 0.0 else: kexo = 0.0 else: kexo = 0.0 return kexo
0e11a1b0b6ea8a43bef954273ed3a32a1d39c842
709,527
def add_manuscript_urls_to_ci_params(ci_params): """ Return and edit in-place the ci_params dictionary to include 'manuscript_url'. This function assumes Travis CI is used to deploy to GitHub Pages, while AppVeyor is used for storing manuscript artifacts for pull request builds. """ if not ci_params: return ci_params assert isinstance(ci_params, dict) provider = ci_params.get('provider') if provider == 'travis': ci_params['manuscript_url'] = ( "https://{repo_owner}.github.io/{repo_name}/v/{commit}/" .format(**ci_params) ) if provider == 'appveyor': ci_params['manuscript_url'] = f"{ci_params['build_url']}/artifacts" return ci_params
7d45c4fe8060d387d0238788e4b7566e09abc499
709,529
import torch from typing import Callable def model_contrast_score(overlays: torch.Tensor, masks: torch.Tensor, object_labels: torch.Tensor, scene_labels: torch.Tensor, object_model: Callable, scene_model: Callable, object_method: Callable, scene_method: Callable, device: str): """ Model contrast score: Difference of importance of object pixels for model trained on object labels (should be important) and model trained on scene labels (should not be important) """ overlays = overlays.to(device) object_labels = object_labels.to(device) scene_labels = scene_labels.to(device) masks = masks.squeeze().to(device) # We check if both the object model and the scene model make the correct classification with torch.no_grad(): y_pred_obj = torch.argmax(object_model(overlays), dim=1) y_pred_scene = torch.argmax(scene_model(overlays), dim=1) correctly_classified = ((y_pred_obj == object_labels) & (y_pred_scene == scene_labels)) object_model_attrs = object_method(overlays, object_labels) scene_model_attrs = scene_method(overlays, scene_labels) mask_sizes = torch.sum(masks.flatten(1), dim=1) diffs = (object_model_attrs - scene_model_attrs) / mask_sizes return diffs.cpu(), correctly_classified.cpu()
b44b0a958a79a1ad7a84de15817cdbc32160c13b
709,530
import time def execution_duration(fun): """ Calculates the duration the function 'fun' takes to execute. execution_duration returns a wrapper function to which you pass your arguments. Example: execution_duration(my_function)(my_first_param, my_second_param) The result of the wrapper function will be a tuple, where the fist value is the return value of your function and the second is the execution time in seconds expressed as a float. """ def wrapper(*args, **kwargs): t1 = time.time() result = fun(*args, **kwargs) exec_dur = time.time() - t1 return result, exec_dur return wrapper
b824ce8e1448a65bd932ec8344b1976d2a86dd09
709,531
from typing import Union def chunks_lists_to_tuples(level: Union[list, int, float]) -> Union[tuple, int, float]: """Convert a recursive list of lists of ints into a tuple of tuples of ints. This is a helper function needed because MongoDB automatically converts tuples to lists, but the dask constructor wants the chunks defined strictly as tuples. e.g. - input: ``[[1, 2], [3, 4]]`` - output: ``((1, 2), (3, 4))`` .. note:: float data type is supported to allow for NaN-sized dask chunks """ if isinstance(level, list): return tuple(chunks_lists_to_tuples(i) for i in level) if isinstance(level, (int, float)): return level raise TypeError(level)
49cc7923211d50fdf6a386016af12b80a2f821df
709,532
def apply_inverse_rot_to_vec(rot, vec): """Multiply the inverse of a rotation matrix by a vector.""" # Inverse rotation is just transpose return [rot[0][0] * vec[0] + rot[1][0] * vec[1] + rot[2][0] * vec[2], rot[0][1] * vec[0] + rot[1][1] * vec[1] + rot[2][1] * vec[2], rot[0][2] * vec[0] + rot[1][2] * vec[1] + rot[2][2] * vec[2]]
1108ac6caa30b3562a2af1bcc83e1c1a1bfd8d4d
709,533
def binstringToBitList(binstring): """Converts a string of '0's and '1's to a list of 0's and 1's""" bitList = [] for bit in binstring: bitList.append(int(bit)) return bitList
d8ff10651d9fc2d02aba3b4a57a0a768032783b7
709,534
def int_to_bitstr(int_value: int) -> str: """ A function which returns its bit representation as a string. Arguments: int_value (int) - The int value we want to get the bit representation for. Return: str - The string representation of the bits required to form the int. """ return bin(int_value)[2:]
cafbf151ce0404081a0a8e1327d85e61ea7ddc52
709,535
def target_reached(effect): """target amount has been reached (100% or more)""" if not effect.instance.target: return False return effect.instance.amount_raised >= effect.instance.target
0101cd9c3c51a1e03ba7cfd8844c3821a156e2fe
709,536
def get_polynomial_coefficients(degree=5): """ Return a list with coefficient names, [1 x y x^2 xy y^2 x^3 ...] """ names = ["1"] for exp in range(1, degree + 1): # 0, ..., degree for x_exp in range(exp, -1, -1): y_exp = exp - x_exp if x_exp == 0: x_str = "" elif x_exp == 1: x_str = r"$x$" else: x_str = rf"$x^{x_exp}$" if y_exp == 0: y_str = "" elif y_exp == 1: y_str = r"$y$" else: y_str = rf"$y^{y_exp}$" names.append(x_str + y_str) return names
9369841215045e925a3453b83be9dc49c9be7b92
709,537
def user_enabled(inst, opt): """ Check whether the option is enabled. :param inst: instance from content object init :param url: Option to be checked :return: True if enabled, False if disabled or non present """ return opt in inst.settings and inst.settings[opt]
3b2a5a1534ff779178eb4bd6b839b66c0b07864f
709,538
def prime_factors(n): """ Return a list of prime factors of n :param n: int :return: list """ # check if 2 is the largest prime all_factors = set() t = n while t % 2 == 0: t /= 2 all_factors.add(2) # check the divisors greater than 2 d = 3 while d < n ** 0.5: while not t % d: t /= d all_factors.add(d) d += 2 return all_factors
09aad44a7b04492c225447eaa15590fa630a43cd
709,539
def erase_not_displayed(client): """Erase all non-displayed models from memory. Args: client (obj): creopyson Client. Returns: None """ return client._creoson_post("file", "erase_not_displayed")
c3981fcce00b5d5440fcbdbe8781e9e6229a8fa7
709,540
def create_coordinate_string_dict(): """31パターンのヒモ。""" w = 120 h = 120 return { 47: (0, 0), 57: (1*-w, 0), 58: (2*-w, 0), 16: (4*-w, 0), 35: (5*-w, 0), 36: (6*-w, 0), 38: (0, 1*-h), 13: (1*-w, 1*-h), 14: (2*-w, 1*-h), 15: (3*-w, 1*-h), 25: (4*-w, 1*-h), 17: (5*-w, 1*-h), 27: (6*-w, 1*-h), 37: (7*-w, 1*-h), 1357: (0, 2*-h), 1571: (1*-w, 2*-h), 7135: (2*-w, 2*-h), 3583: (4*-w, 2*-h), 274: (5*-w, 2*-h), 1361: (6*-w, 2*-h), 1371: (0, 3*-h), 15037: (1*-w, 3*-h), 3573: (2*-w, 3*-h), 416: (4*-w, 3*-h), 258: (6*-w, 3*-h), 1753: (0, 4*-h), 1351: (1*-w, 4*-h), 3175: (2*-w, 4*-h), 2572: (4*-w, 4*-h), 638: (5*-w, 4*-h), 1471: (6*-w, 4*-h), }
4abc2b246345569780db2dc9f6ef71c56ae86528
709,541
def all_but_ast(check): """Only passes AST to check.""" def _check_wrapper(contents, ast, **kwargs): """Wrap check and passes the AST to it.""" del contents del kwargs return check(ast) return _check_wrapper
71f3e3b8649a3a9885ded7eec248894cca8083c4
709,542
def normalize(*args): """Scale a sequence of occurrences into probabilities that sum up to 1.""" total = sum(args) return [arg / total for arg in args]
49b0f998fe58b2c85da5a993e542d91bb5dd5382
709,543
import requests def _make_request( resource: str, from_currency_code: str, to_currency_code: str, timestamp: int, access_token: str, exchange_code: str, num_records: int, api_version: str ) -> requests.Response: """ API documentation for cryptocompare can be found at https://min-api.cryptocompare.com/documentation """ base_url = f"https://min-api.cryptocompare.com/data/{api_version}/{resource}" params = { "fsym": from_currency_code, "tsym": to_currency_code, "e": exchange_code, "limit": num_records, "toTs": timestamp, "api_key": access_token } return requests.get(base_url, params=params)
4da7c3cab42b742b106fafb4c1585e6ecb250121
709,544
import os def root_dir(): """ Returns root director for this project """ return os.path.dirname(os.path.realpath(__file__ + '/..'))
c9346df7838dd0a528613a7069c55d910373fe86
709,546
from warnings import warn def get_case_color_marker(case): """Get color and marker based on case.""" black_o = ("#000000", "o") teal_D = ("#469990", "D") orange_s = ("#de9f16", "s") purple_v = ("#802f99", "v") bs = case["batch_size"] sub = case["subsampling"] mc = case["mc_samples"] if sub is None and mc == 0: # only bs mapping = {2: purple_v, 8: orange_s, 32: teal_D, 128: black_o} try: return mapping[bs] except KeyError: warn(f"Could not map bs={bs} to color-marker-pair. Returning (black, o)") return black_o if sub is not None and mc == 0: # only bs & sub return teal_D if sub is None and mc != 0: # only bs & mc return orange_s if sub is not None and mc != 0: # bs, sub & mc return purple_v
cfb5e649023d84beeef4ea9157baf680496039ce
709,547
from typing import Any from typing import Dict def _adjust_estimator_options(estimator: Any, est_options: Dict[str, Any], **kwargs) -> Dict[str, Any]: """ Adds specific required classifier options to the `clf_options` dictionary. Parameters ---------- classifier : Any The classifier object for which the options have to be added clf_options : Dict[str, Any] Dictionary, where the additional classifier options should be added to kwargs : Additional classifier options as keyword arguments Returns ------- Dict[str, Any] The input `clf_options` dictionary containing the additional classifier options """ if estimator.__name__ == 'XGBClassifier': est_options['num_class'] = kwargs['n_categories'] elif estimator.__name__ == 'DNNClassifier': est_options['n_classes'] = kwargs['n_categories'] est_options['n_features'] = kwargs['n_features'] est_options['random_state'] = kwargs['random_seed'] return est_options
4ff98d8a3b3e647e129fb0ffbc9bc549caa60440
709,551
def distance_loop(x1, x2): """ Returns the Euclidean distance between the 1-d numpy arrays x1 and x2""" return -1
abd35a27cbeb5f5c9fe49a2a076d18f16e2849d9
709,552
import torch def fft_to_complex_matrix(x): """ Create matrix with [a -b; b a] entries for complex numbers. """ x_stacked = torch.stack((x, torch.flip(x, (4,))), dim=5).permute(2, 3, 0, 4, 1, 5) x_stacked[:, :, :, 0, :, 1] *= -1 return x_stacked.reshape(-1, 2 * x.shape[0], 2 * x.shape[1])
9fb38004041280da0d6d53830761501aebf7969a
709,553
import argparse import os def get_args(): """ Get User defined arguments, or assign defaults :rtype: argparse.ArgumentParser() :return: User defined or default arguments """ parser = argparse.ArgumentParser() # Positional arguments parser.add_argument("main_args", type=str, nargs="*", help="task for Seisflows to perform") # Optional parameters parser.add_argument("-w", "--workdir", nargs="?", default=os.getcwd()) parser.add_argument("-p", "--parameter_file", nargs="?", default="parameters.yaml") return parser.parse_args()
2f31a2142034127d3de7f4212841c3432b451fc4
709,554
def get_fields(fields): """ From the last column of a GTF, return a dictionary mapping each value. Parameters: fields (str): The last column of a GTF Returns: attributes (dict): Dictionary created from fields. """ attributes = {} description = fields.strip() description = [x.strip() for x in description.split(";")] for pair in description: if pair == "": continue pair = pair.replace('"', '') key, val = pair.split() attributes[key] = val # put in placeholders for important attributes (such as gene_id) if they # are absent if 'gene_id' not in attributes: attributes['gene_id'] = 'NULL' return attributes
30777838934b18a0046017f3da6b3a111a911a9c
709,555
def add_log_group_name_params(log_group_name, configs): """Add a "log_group_name": log_group_name to every config.""" for config in configs: config.update({"log_group_name": log_group_name}) return configs
a5fce8143c3404257789c1720bbfefc49c8ea3f5
709,556
import time def time_remaining(event_time): """ Args: event_time (time.struct_time): Time of the event. Returns: float: Time remaining between now and the event, in seconds since epoch. """ now = time.localtime() time_remaining = time.mktime(event_time) - time.mktime(now) return time_remaining
cb3dfcf916cffc3b45f215f7642aeac8a1d6fef7
709,558
def has_no_jump(bigram, peaks_groundtruth): """ Tell if the two components of the bigram are same or successive in the sequence of valid peaks or not For exemple, if groundtruth = [1,2,3], [1,1] or [2,3] have no jump but [1,3] has a jump. bigram : the bigram to judge peaks_groundtruth : the list of valid peaks Return boolean """ assert len(bigram) == 2 if len(set(bigram)) == 1: return True sorted_groundtruth = sorted(peaks_groundtruth) sorted_peaks = sorted(list(bigram)) begin = sorted_groundtruth.index(sorted_peaks[0]) end = begin+len(sorted_peaks) return sorted_peaks == sorted_groundtruth[begin:end]
e334c389436d5cda2642f8ac7629b64074dcd0e0
709,559
def hasf(e): """ Returns a function which if applied with `x` tests whether `x` has `e`. Examples -------- >>> filter(hasf("."), ['statement', 'A sentence.']) ['A sentence.'] """ return lambda x: e in x
ac9ce7cf2ed2ee8a050acf24a8d0a3b95b7f2d50
709,560
def obtenTipoNom(linea): """ Obtiene por ahora la primera palabra del título, tendría que regresar de que se trata""" res = linea.split('\t') return res[6].partition(' ')[0]
73edc42c5203b7ebd0086876096cdd3b7c65a54c
709,561
def part_5b_avg_std_dev_of_replicates_analysis_completed(*jobs): """Check that the initial job data is written to the json files.""" file_written_bool_list = [] all_file_written_bool_pass = False for job in jobs: data_written_bool = False if job.isfile( f"../../src/engines/gomc/averagesWithinReplicatez.txt" ) and job.isfile(f"../../src/engines/gomc/setAverages.txt"): data_written_bool = True file_written_bool_list.append(data_written_bool) if False not in file_written_bool_list: all_file_written_bool_pass = True return all_file_written_bool_pass
f238382e18de32b86598d5daa13f92af01311d3d
709,562
def indent_multiline(s: str, indentation: str = " ", add_newlines: bool = True) -> str: """Indent the given string if it contains more than one line. Args: s: String to indent indentation: Indentation to prepend to each line. add_newlines: Whether to add newlines surrounding the result if indentation was added. """ lines = s.splitlines() if len(lines) <= 1: return s lines_str = "\n".join(f"{indentation}{line}" for line in lines) if add_newlines: return f"\n{lines_str}\n" else: return lines_str
62eb2fc7c3f3b493a6edc009692f472e50e960f7
709,563
from typing import Optional def _get_property(self, key: str, *, offset: int = 0) -> Optional[int]: """Get a property from the location details. :param key: The key for the property :param offset: Any offset to apply to the value (if found) :returns: The property as an int value if found, None otherwise """ value = self.location_details.get(key) if value is None: return None return int(value[0]) + offset
8d2c35a88810db5255cfb0ca9d7bfa6345ff3276
709,564
def _reformTrend(percs, inits): """ Helper function to recreate original trend based on percent change data. """ trend = [] trend.append(percs[0]) for i in range(1, len(percs)): newLine = [] newLine.append(percs[i][0]) #append the date for j in range(1, len(percs[i])): #for each term on date level = float(trend[i-1][j]) * percs[i][j].numerator / percs[i][j].denominator #level is the prev level * %change newLine.append(level) trend.append(newLine) return trend
1f6c8bbb4786b53ea2c06643108ff50691b6f89c
709,565
def phases(times, names=[]): """ Creates named phases from a set of times defining the edges of hte intervals """ if not names: names = range(len(times)-1) return {names[i]:[times[i], times[i+1]] for (i, _) in enumerate(times) if i < len(times)-1}
0e56dcf57a736e4555cae02b8f79b827c17e1d38
709,566
def is_iterable(value): """Return True if the object is an iterable type.""" return hasattr(value, '__iter__')
55e1ecc9b264d39aaf5cfcbe89fdc01264191d95
709,567
def remove_scope_from_name(name, scope): """ Args: name (str): full name of the tf variable with all the scopes Returns: (str): full name of the variable with the scope removed """ result = name.split(scope)[1] result = result[1:] if result[0] == '/' else result return result.split(":")[0]
aa70042a2f57185a0f5e401d182a02e5654eb2b0
709,568