content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import hashlib import six def make_hashkey(seed): """ Generate a string key by hashing """ h = hashlib.md5() h.update(six.b(str(seed))) return h.hexdigest()
38d088005cb93fc0865933bbb706be171e72503a
705,905
def fix(x): """ Replaces spaces with tabs, removes spurious newlines, and lstrip()s each line. Makes it really easy to create BED files on the fly for testing and checking. """ s = "" for i in x.splitlines(): i = i.lstrip() if i.endswith('\t'): add_tab = '\t' else: add_tab = '' if len(i) == 0: continue i = i.split() i = '\t'.join(i) + add_tab + '\n' s += i return s
ecd3a4d7f470feae1b697025c8fbf264d5c6b149
705,906
def get_ngram_universe(sequence, n): """ Computes the universe of possible ngrams given a sequence. Where n is equal to the length of the sequence, the resulting number represents the sequence universe. Example -------- >>> sequence = [2,1,1,4,2,2,3,4,2,1,1] >>> ps.get_ngram_universe(sequence, 3) 64 """ # if recurrance is possible, the universe is given by k^t (SSA pg 68) k = len(set(sequence)) if k > 10 and n > 10: return 'really big' return k**n
3dbfe1822fdefb3e683b3f2b36926b4bb066468f
705,907
def replace_header(input_df): """replace headers of the dataframe with first row of sheet""" new_header = input_df.iloc[0] input_df = input_df[1:] input_df.columns=new_header return input_df
c8946fc269dd313b80df421af8d0b3fc6c47aed7
705,908
def cartToRadiusSq(cartX, cartY): """Convert Cartesian coordinates into their corresponding radius squared.""" return cartX**2 + cartY**2
3fb79d2c056f06c2fbf3efc14e08a36421782dbd
705,909
def unique_entity_id(entity): """ :param entity: django model :return: unique token combining the model type and id for use in HTML """ return "%s-%s" % (type(entity).__name__, entity.id)
c58daf9a115c9840707ff5e807efadad36a86ce8
705,910
def get_job_metadata(ibs, jobid): """ Web call that returns the metadata of a job CommandLine: # Run Everything together python -m wbia.web.job_engine --exec-get_job_metadata # Start job queue in its own process python -m wbia.web.job_engine job_engine_tester --bg # Start web server in its own process ./main.py --web --fg pass # Run foreground process python -m wbia.web.job_engine --exec-get_job_metadata:0 --fg Example: >>> # xdoctest: +REQUIRES(--web-tests) >>> # xdoctest: +REQUIRES(--slow) >>> # xdoctest: +REQUIRES(--job-engine-tests) >>> # xdoctest: +REQUIRES(--web-tests) >>> from wbia.web.job_engine import * # NOQA >>> import wbia >>> with wbia.opendb_bg_web('testdb1', managed=True) as web_ibs: # , domain='http://52.33.105.88') ... # Test get metadata of a job id that does not exist ... response = web_ibs.send_wbia_request('/api/engine/job/metadata/', jobid='badjob') """ status = ibs.job_manager.jobiface.get_job_metadata(jobid) return status
24ba96d6a71f105057a9fc9012de9edb187787d5
705,912
def variable_to_json(var): """Converts a Variable object to dict/json struct""" o = {} o['x'] = var.x o['y'] = var.y o['name'] = var.name return o
86497a7915e4825e6e2cbcfb110c9bc4c229efed
705,913
import json def get_result_handler(rc_value, sa_file=None): """Returns dict of result handler config. Backwards compatible for JSON input. rc_value (str): Result config argument specified. sa_file (str): SA path argument specified. """ try: result_handler = json.loads(rc_value) except json.decoder.JSONDecodeError: config = rc_value.split(".", 1) if len(config) == 2: result_handler = { "type": "BigQuery", "project_id": config[0], "table_id": config[1], } else: raise ValueError(f"Unable to parse result handler config: `{rc_value}`") if sa_file: result_handler["google_service_account_key_path"] = sa_file return result_handler
83c6aa6e0cacdc64422553050072af5d8ea46bf6
705,914
def getOrElseUpdate(dictionary, key, opr): """If given key is already in the dictionary, returns associated value. Otherwise compute the value with opr, update the dictionary and return it. None dictionary are ignored. >>> d = dict() >>> getOrElseUpdate(d, 1, lambda _: _ + 1) 2 >>> print(d) {1: 2} @type dictionary: dictionary of A => B @param dictionary: the dictionary @type key: A @param key: the key @type opr: function of A => B @param opr: the function to compute new value from keys @rtype: B @return: the value associated with the key """ if dictionary is None: return opr(key) else: if key not in dictionary: dictionary[key] = opr(key) return dictionary[key]
95454d7ca34d6ae243fda4e70338cf3d7584b827
705,915
from typing import OrderedDict def set_standard_attrs(da): """ Add standard attributed to xarray DataArray""" da.coords["lat"].attrs = OrderedDict( [ ("standard_name", "latitude"), ("units", "degrees_north"), ("axis", "Y"), ("long_name", "latitude"), ("out_name", "lat"), ("stored_direction", "increasing"), ("type", "double"), ("valid_max", "90.0"), ("valid_min", "-90.0"), ] ) da.coords["lon"].attrs = OrderedDict( [ ("standard_name", "longitude"), ("units", "degrees_east"), ("axis", "X"), ("long_name", "longitude"), ("out_name", "lon"), ("stored_direction", "increasing"), ("type", "double"), ("valid_max", "180.0"), ("valid_min", "-180.0"), ] ) da.coords["depth_coord"].attrs = OrderedDict( [ ("standard_name", "depth"), ("units", "m"), ("axis", "Z"), ("long_name", "ocean depth coordinate"), ("out_name", "lev"), ("positive", "down"), ("stored_direction", "increasing"), ("valid_max", "12000.0"), ("valid_min", "0.0"), ] ) da.coords["time"].attrs = OrderedDict( [ ("standard_name", "time"), ("axis", "T"), ("long_name", "time"), ("out_name", "time"), ("stored_direction", "increasing"), ] ) da.coords["time"].encoding["units"] = "days since '1900-01-01'" return da
21f83552466127928c9a30e9354e91c3031225aa
705,916
import os def find_git_repos(folder): """ Returns a list of all git repos within the given ancestor folder. """ return [root for root, subfolders, files in os.walk(folder) if '.git' in subfolders]
615fcc3e947ac3f198638acb23b8a8118c3ec9cd
705,917
def const_bool(value): """Create an expression representing the given boolean value. If value is not a boolean, it is converted to a boolean. So, for instance, const_bool(1) is equivalent to const_bool(True). """ return ['constant', 'bool', ['{0}'.format(1 if value else 0)]]
d11d01f94b8ad20d393a39a28dbfd18cc8fa217e
705,918
import struct def long_to_bytes(n, blocksize=0): """Convert an integer to a byte string. In Python 3.2+, use the native method instead:: >>> n.to_bytes(blocksize, 'big') For instance:: >>> n = 80 >>> n.to_bytes(2, 'big') b'\x00P' If the optional :data:`blocksize` is provided and greater than zero, the byte string is padded with binary zeros (on the front) so that the total length of the output is a multiple of blocksize. If :data:`blocksize` is zero or not provided, the byte string will be of minimal length. """ # after much testing, this algorithm was deemed to be the fastest s = b'' n = int(n) pack = struct.pack while n > 0: s = pack('>I', n & 0xffffffff) + s n = n >> 32 # strip off leading zeros for i in range(len(s)): if s[i] != b'\000'[0]: break else: # only happens when n == 0 s = b'\000' i = 0 s = s[i:] # add back some pad bytes. this could be done more efficiently w.r.t. the # de-padding being done above, but sigh... if blocksize > 0 and len(s) % blocksize: s = (blocksize - len(s) % blocksize) * b'\000' + s return s
1157a466ce9754c12e01f7512e879cc28a2a4b23
705,919
def filt_all(list_, func): """Like filter but reverse arguments and returns list""" return [i for i in list_ if func(i)]
72010b483cab3ae95d49b55ca6a70b0838b0a34d
705,920
def _rav_setval_ ( self , value ) : """Assign the valeu for the variable >>> var = ... >>> var.value = 10 """ value = float ( value ) self.setVal ( value ) return self.getVal()
80ad7ddec68d5c97f72ed63dd6ba4a1101de99cb
705,921
def bb_to_plt_plot(x, y, w, h): """ Converts a bounding box to parameters for a plt.plot([..], [..]) for actual plotting with pyplot """ X = [x, x, x+w, x+w, x] Y = [y, y+h, y+h, y, y] return X, Y
10ea3d381969b7d30defdfdbbac0a8d58d06d4d4
705,924
from typing import Counter def count_items(column_list:list): """ Contar os tipos (valores) e a quantidade de items de uma lista informada args: column_list (list): Lista de dados de diferentes tipos de valores return: Retorna dois valores, uma lista de tipos (list) e o total de itens de cada tipo (list) """ counter = Counter(column_list) item_types = list(counter.keys()) count_items = list(counter.values()) return item_types, count_items
06cf25aed4d0de17fa8fb11303c9284355669cf5
705,925
def to_graph(grid): """ Build adjacency list representation of graph Land cells in grid are connected if they are vertically or horizontally adjacent """ adj_list = {} n_rows = len(grid) n_cols = len(grid[0]) land_val = "1" for i in range(n_rows): for j in range(n_cols): if grid[i][j] == land_val: adj_list[(i,j)] = [] if i > 0 and grid[i-1][j] == land_val: adj_list[(i,j)].append((i-1,j)) if i < n_rows-1 and grid[i+1][j] == land_val: adj_list[(i,j)].append((i+1,j)) if j > 0 and grid[i][j-1] == land_val: adj_list[(i,j)].append((i,j-1)) if j < n_cols-1 and grid[i][j+1] == land_val: adj_list[(i,j)].append((i,j+1)) return adj_list
ebdd0406b123a636a9d380391ef4c13220e2dabd
705,926
import os def get_server_url(): """ Return current server url, does not work in a task """ host = os.environ.get('HTTP_X_FORWARDED_HOST') or os.environ['HTTP_HOST'] return u'%s://%s' % (os.environ['wsgi.url_scheme'], host)
c71f2244b8dd023b11a6db0aee885d4d332f3a7c
705,927
def qs_without_parameter(arg1, arg2): """ Removes an argument from the get URL. Use: {{ request|url_without_parameter:'page' }} Args: arg1: request arg2: parameter to remove """ parameters = {} for key, value in arg1.items(): if parameters.get(key, None) is None and arg2 != key: try: parameters[key] = value[0] except IndexError: parameters[key] = value return "&".join( [k + "=" + v for k, v in parameters.items()])
649931de5490621c92513877b21cb8cfce8d66ff
705,928
import re def _remove_comments_inline(text): """Removes the comments from the string 'text'.""" if 'auto-ignore' in text: return text if text.lstrip(' ').lstrip('\t').startswith('%'): return '' match = re.search(r'(?<!\\)%', text) if match: return text[:match.end()] + '\n' else: return text
463e29e1237a88e91c13a58ffea1b2ccdafd4a1d
705,929
def is_pj_player_plus(value): """ :param value: The value to be checked :type value: Any :return: whether or not the value is a PJ Player+ :rtype: bool """ return isinstance(value, list) and len(value) == 4 or len(value) == 3
1c4e7a7513d746d25f6b3d7964455b0735c988fc
705,930
def get_facts(F5, uri): """ Issue a GET of the URI specified to the F5 appliance and return the result as facts. If the URI must have a slash as the first character, add it if missing In Ansible 2.2 found name clashing http://stackoverflow.com/questions/40281706/cant-read-custom-facts-with-list-array-of-items """ result = { 'ansible_facts': {} } if uri[0] != "/": uri = "/" + uri status, result["ansible_facts"] = F5.genericGET(uri) try: result["ansible_facts"]["bigip_items"] = result["ansible_facts"].pop("items") # replace key name of 'items' with 'bigip_items' except: result["ansible_facts"]["bigip_items"] = dict() return status, result
554cc7b9bf35d631c8742614142f5aa2ecaba9b4
705,931
def gather_emails_GUIDs(mailbox, search, folder): """ Download GUID of messages passing search requirements """ mailbox.folder.set(folder) return (email for email in mailbox.uids(search))
d75ecdeaa4f95f9108276f2be236e33934d7de01
705,932
def ndo_real(data, n): """mimic of gmx_fio_ndo_real in gromacs""" return [data.unpack_real() for i in range(n)]
875edd4c78e591fcee1b3de30f0ed62a4d0b074d
705,933
import torch def get_bernoulli_sample(probs): """Conduct Bernoulli sampling according to a specific probability distribution. Args: prob: (torch.Tensor) A tensor in which each element denotes a probability of 1 in a Bernoulli distribution. Returns: A Tensor of binary samples (0 or 1) with the same shape of probs. """ if torch.cuda.is_available(): bernoulli_sample = torch.ceil(probs - torch.rand(probs.shape, device=torch.device('cuda'))) else: bernoulli_sample = torch.ceil(probs - torch.rand(probs.shape)) return bernoulli_sample
14c45741d47f5eaff24893471425ddd4de7e2e4b
705,934
import subprocess import locale def parse_env_file(filename, pattern): """Source a shell script and extract variables from it.""" # Use the shell to parse this so we can also read substitutions # like $() for example. env = {} command = 'source {}; set | grep -E "{}"'.format(filename, pattern) output = subprocess.check_output(['sh', '-c', command]) output = output.decode(locale.getpreferredencoding()) for line in output.splitlines(): p1 = line.find('=') env[line[:p1]] = line[p1+1:] return env
d7a3a14bc163066f0232bf7811c397fbb594b45c
705,935
def CollectUniqueByOrderOfAppearance(dataset:list): """ This method collect all unique in order of appearance and return it as list. :param dataset:list: dataset list """ try: seen = set() seen_add = seen.add return [x for x in dataset if not (x in seen or seen_add(x))] except Exception as ex: template = "An exception of type {0} occurred in [ContentSupport.CollectUniqueByOrderOfAppearance]. Arguments:\n{1!r}" message = template.format(type(ex).__name__, ex.args) print(message)
e252d064bf0c525ec1c1781ca6dc915dbc9d46f0
705,936
import fcntl def has_flock(fd): """ Checks if fd has flock over it True if it is, False otherwise :param fd: :return: :rtype: bool """ try: fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) except BlockingIOError: return True else: return False
9ae997d06a12d73a659958bc2f0467ebdf0142b7
705,937
def ExtractCodeBySystem( codable_concept, system): """Extract code in codable_concept.""" for coding in codable_concept.coding: if (coding.HasField('system') and coding.HasField('code') and coding.system.value == system): return coding.code.value return None
e672cb3d2c1d8d65e49d00539cdecf6ee03d1143
705,938
def record_or_not(record_mode, line, start_block, end_block): """ """ if not record_mode: if start_block in line: record_mode = True elif end_block in line: record_mode = False return record_mode
2b3952ab7fa3aa23ccbd712dee0aa06083b7b5f5
705,939
def id_test_data(value): """generate id""" return f"action={value.action_name} return={value.return_code}"
47649b7302ef2f3ad046fc1c7b3fc18da2687921
705,940
import json def assertDict(s): """ Assert that the input is a dictionary. """ if isinstance(s,str): try: s = json.loads(s) except: raise AssertionError('String "{}" cannot be json-decoded.'.format(s)) if not isinstance(s,dict): raise AssertionError('Variable "{}" is not a dictionary.'.format(s)) return s
302defb4e1eecc9a6171cda0401947e3251be585
705,942
import ast from typing import Tuple from typing import List from typing import Any def get_function_args(node: ast.FunctionDef) -> Tuple[List[Any], List[Any]]: """ This functon will process function definition and will extract all arguments used by a given function and return all optional and non-optional args used by the function. Args: node: Function node containing function that needs to be analyzed Returns: (non_optional_args, optional_args): named function args """ assert ( type(node) == ast.FunctionDef ), "Incorrect node type. Expected ast.FunctionDef, got {}".format(type(node)) total_args = len(node.args.args) default_args = len(node.args.defaults) optional_args = [] non_optional_args = [] # Handle positional args for i in range(total_args): if i + default_args < total_args: non_optional_args.append(node.args.args[i].arg) else: optional_args.append(node.args.args[i].arg) # Handle named args for arg in node.args.kwonlyargs: optional_args.append(arg.arg) return non_optional_args, optional_args
a4fe9dccedd5684050a7d5e7949e384dd4021035
705,943
import collections import csv def ParseMemCsv(f): """Compute summary stats for memory. vm5_peak_kib -> max(vm_peak_kib) # over 5 second intervals. Since it uses the kernel, it's accurate except for takes that spike in their last 4 seconds. vm5_mean_kib -> mean(vm_size_kib) # over 5 second intervals """ peak_by_pid = collections.defaultdict(list) size_by_pid = collections.defaultdict(list) # Parse columns we care about, by PID c = csv.reader(f) for i, row in enumerate(c): if i == 0: continue # skip header # looks like timestamp, pid, then (rss, peak, size) _, pid, _, peak, size = row if peak != '': peak_by_pid[pid].append(int(peak)) if size != '': size_by_pid[pid].append(int(size)) mem_by_pid = {} # Now compute summaries pids = peak_by_pid.keys() for pid in pids: peaks = peak_by_pid[pid] vm5_peak_kib = max(peaks) sizes = size_by_pid[pid] vm5_mean_kib = sum(sizes) / len(sizes) mem_by_pid[pid] = (vm5_peak_kib, vm5_mean_kib) return mem_by_pid
5d10a0d0ac5ab3d3e99ff5fd4c9ca6cd0b74656b
705,944
def index_containing_substring(list_str, substring): """For a given list of strings finds the index of the element that contains the substring. Parameters ---------- list_str: list of strings substring: substring Returns ------- index: containing the substring or -1 """ for i, s in enumerate(list_str): if substring in s: return i return -1
2816899bc56f6b2c305192b23685d3e803b420df
705,945
def iatan2(y,x): """One coordinate must be zero""" if x == 0: return 90 if y > 0 else -90 else: return 0 if x > 0 else 180
a0b18b61d7ffadf864a94299bc4a3a0aacd7c65a
705,946
import torch def fuse_bn_sequential(model): """ This function takes a sequential block and fuses the batch normalization with convolution :param model: nn.Sequential. Source resnet model :return: nn.Sequential. Converted block """ if not isinstance(model, torch.nn.Sequential): return model stack = [] for m in model.children(): if isinstance(m, torch.nn.BatchNorm2d): if isinstance(stack[-1], torch.nn.Conv2d): bn_st_dict = m.state_dict() conv_st_dict = stack[-1].state_dict() # BatchNorm params eps = m.eps mu = bn_st_dict['running_mean'] var = bn_st_dict['running_var'] gamma = bn_st_dict['weight'] if 'bias' in bn_st_dict: beta = bn_st_dict['bias'] else: beta = torch.zeros(gamma.size(0)).float().to(gamma.device) # Conv params W = conv_st_dict['weight'] if 'bias' in conv_st_dict: bias = conv_st_dict['bias'] else: bias = torch.zeros(W.size(0)).float().to(gamma.device) denom = torch.sqrt(var + eps) b = beta - gamma.mul(mu).div(denom) A = gamma.div(denom) bias *= A A = A.expand_as(W.transpose(0, -1)).transpose(0, -1) W.mul_(A) bias.add_(b) stack[-1].weight.data.copy_(W) if stack[-1].bias is None: stack[-1].bias = torch.nn.Parameter(bias) else: stack[-1].bias.data.copy_(bias) else: stack.append(m) if len(stack) > 1: return torch.nn.Sequential(*stack) else: return stack[0]
6d31cd2cd73e8dc91098b7f9cc7f70ce3b81a3b9
705,947
def isMultipleTagsInput(item): """ Returns True if the argument datatype is not a column or a table, and if it allows lists and if it has no permitted value. This function is used to check whether the argument values have to be delimited by the null character (returns True) or not. :param item: Table argument. """ return item.get('datatype', 'STRING') in ['STRING','DOUBLE','INTEGER','DRIVER','SQLEXPR', 'LONG']\ and item.get('allowsLists', False)\ and not item.get('permittedValues', [])
f7710902e27962fc8df55bc75be2d5d404144aeb
705,948
from typing import Tuple from typing import Dict from typing import List import re def clean_status_output( input: str, ) -> Tuple[bool, Dict[str, str], List[Dict[str, str]]]: # example input """ # Health check: # - dns: rename /etc/resolv.conf /etc/resolv.pre-tailscale-backup.conf: device or resource busy 100.64.0.1 test_domain_1 omnet linux - 100.64.0.2 test_network_1 omnet linux active; relay "syd", tx 1188 rx 1040 """ up = False peers: List[Dict[str, str]] = [] host: Dict[str, str] = {} if "Tailscale is stopped." in input: return up, host, peers elif "unexpected state: NoState" in input: return up, host, peers count = 0 for line in str(input).split("\n"): matches = re.match(r"^\d.+", line) if matches is not None: try: stat_parts = re.split(r"(\s+)", matches.string) entry = {} entry["ip"] = stat_parts[0] entry["hostname"] = stat_parts[2] entry["network"] = stat_parts[4] entry["os"] = stat_parts[6] connection_info_parts = matches.string.split(entry["os"]) entry["connection_info"] = "n/a" connection_info = "" if len(connection_info_parts) > 1: connection_info = connection_info_parts[1].strip() entry["connection_info"] = connection_info entry["connection_status"] = "n/a" if "active" in connection_info: entry["connection_status"] = "active" if "idle" in connection_info: entry["connection_status"] = "idle" entry["connection_type"] = "n/a" if "relay" in connection_info: entry["connection_type"] = "relay" if "direct" in connection_info: entry["connection_type"] = "direct" if count == 0: host = entry count += 1 up = True else: peers.append(entry) except Exception as e: print("Error parsing tailscale status output", e) pass return up, host, peers
bbf100514373595948b0691dff857deb5772f019
705,949
def calculate_n_inputs(inputs, config_dict): """ Calculate the number of inputs for a particular model. """ input_size = 0 for input_name in inputs: if input_name == 'action': input_size += config_dict['prior_args']['n_variables'] elif input_name == 'state': input_size += config_dict['misc_args']['state_size'] elif input_name == 'reward': input_size += 1 elif input_name in ['params', 'grads']: if config_dict['approx_post_args']['constant_scale']: input_size += config_dict['prior_args']['n_variables'] else: input_size += 2 * config_dict['prior_args']['n_variables'] return input_size
78d750ff4744d872d696dcb454933c868b0ba41e
705,950
def chromosome_to_smiles(): """Wrapper function for simplicity.""" def sc2smi(chromosome): """Generate a SMILES string from a list of SMILES characters. To be customized.""" silyl = "([Si]([C])([C])([C]))" core = chromosome[0] phosphine_1 = ( "(P(" + chromosome[1] + ")(" + chromosome[2] + ")(" + chromosome[3] + "))" ) phosphine_2 = ( "(P(" + chromosome[4] + ")(" + chromosome[5] + ")(" + chromosome[6] + "))" ) smiles = "{0}{1}{2}{3}".format(core, phosphine_1, phosphine_2, silyl) return smiles return sc2smi
793995484c46295977f1d312c4fa11f69bca6c84
705,951
def command_result_processor_parameter_required(command_line_parameter): """ Command result message processor if a parameter stays unsatisfied. Parameters ---------- command_line_parameter : ``CommandLineParameter`` Respective command parameter. Returns ------- message : `str` """ message_parts = [] message_parts.append('Parameter: ') message_parts.append(repr(command_line_parameter.name)) message_parts.append(' is required.\n') return ''.join(message_parts)
fed1b7af60018cb5638e021365ae754477b7a241
705,952
def extract_file_from_zip(zipfile, filename): """ Returns the compressed file `filename` from `zipfile`. """ raise NotImplementedError() return None
dc7b1e5a196a019d1fd2274155e0404b03b09702
705,953
def all_not_none(*args): """Shorthand function for ``all(x is not None for x in args)``. Returns True if all `*args` are not None, otherwise False.""" return all(x is not None for x in args)
2d063f39e253a78b28be6857df08d8f386d8eb4a
705,954
def get_for_tag(app_name): """ Retorna a tag for customizada para listar registros no template list.html :param app_name: Nome do app que está sendo criado :type app_name: str """ return "{% for " + app_name + " in " + app_name + "s %}"
12399a148262893047bf21c20e784bfb33373c29
705,955
def _reloadFn(*args): """Placeholder callback function for :func:`_handleSIGHUP`.""" return True
261a54f52e4e448671b8625dae4fbc67116bd546
705,956
def whats_the_meaning_of_life(n_cores=23): """Answers the question about the meaning of life. You don't even have to ask the question, it will figure it out for you. Don't use more cores than available to mankind. Parameters ---------- n_cores: int [default: 23] The number of CPU cores to use. Returns ------- int The type of the expected answer is of course an integer. """ return 42
9b42257161ad3063bd7d8faddb6e385aa5586bf0
705,957
def int_validator(inp, ifallowed): """ Test whether only (positive) integers are being keyed into a widget. Call signature: %S %P """ if len(ifallowed) > 10: return False try: return int(inp) >= 0 except ValueError: return False return True
ee433a6365a0aad58cab0cd59fa05e132b669053
705,958
import logging import requests def scrape_page(url): """ scrape page by url and return its html :param url: page url :return: html of page """ logging.info('scraping %s...', url) try: response = requests.get(url) if response.status_code == 200: return response.text logging.error('get invalid status code %s while scraping %s', response.status_code, url) except requests.RequestException: logging.error('error occurred while scraping %s', url, exc_info=True)
a09eb79ce6abe25e4eb740dcfeb7a4debfca0b88
705,959
def is_one_line_function_declaration_line(line: str) -> bool: # pylint:disable=invalid-name """ Check if line contains function declaration. """ return 'def ' in line and '(' in line and '):' in line or ') ->' in line
e402cbbedc587ab0d572dfe6c074aadef6980658
705,960
def get_all_applications(user, timeslot): """ Get a users applications for this timeslot :param user: user to get applications for :param timeslot: timeslot to get the applications. :return: """ return user.applications.filter(Proposal__TimeSlot=timeslot)
40aec747174fa4a3ce81fe2a3a5eee599c81643a
705,961
import importlib def get_dataset(cfg, designation): """ Return a Dataset for the given designation ('train', 'valid', 'test'). """ dataset = importlib.import_module('.' + cfg['dataset'], __package__) return dataset.create(cfg, designation)
3f872d6407110cf735968ad6d4939b40fec9167d
705,962
def RestrictDictValues( aDict, restrictSet ): """Return a dict which has the mappings from the original dict only for values in the given set""" return dict( item for item in aDict.items() if item[1] in restrictSet )
4333c40a38ad3bce326f94c27b4ffd7dc24ae19c
705,963
def timeRangeContainsRange(event1Start, event2Start, event1End, event2End): """ Returns true if one set of times starts and ends within another set of times @param event1Start: datetime @param event2Start: datetime @param event1End: datetime @param event2End: datetime @return: boolean """ if event2Start <= event1Start and event2End >= event1End: return True elif event1Start <= event2Start and event1End >= event2End: return True else: return False
05d25969b1f97f2f7015c9ce9bafbffcb931cb9b
705,964
def abs_length_diff(trg, pred): """Computes absolute length difference between a target sequence and a predicted sequence Args: - trg (str): reference - pred (str): generated output Returns: - absolute length difference (int) """ trg_length = len(trg.split(' ')) pred_length = len(pred.split(' ')) return abs(trg_length - pred_length)
b5baf53609b65aa1ef3b1f142e965fa0606b3136
705,966
import re def parse_msig_storage(storage: str): """Parse the storage of a multisig contract to get its counter (as a number), threshold (as a number), and the keys of the signers (as Micheline sequence in a string).""" # put everything on a single line storage = ' '.join(storage.split('\n')) storage_regexp = r'Pair\s+?([0-9]+)\s+?([0-9]+)\s+?(.*)\s*' match = re.search(storage_regexp, storage) assert match is not None return { 'counter': int(match[1]), 'threshold': int(match[2]), 'keys': match[3], }
6e04091721177cdd3d40b86717eb86ebbb92a8ff
705,967
from functools import reduce from operator import add def assemble_docstring(parsed, sig=None): """ Assemble a docstring from an OrderedDict as returned by :meth:`nd.utils.parse_docstring()` Parameters ---------- parsed : OrderedDict A parsed docstring as obtained by ``nd.utils.parse_docstring()``. sig : function signature, optional If provided, the parameters in the docstring will be ordered according to the parameter order in the function signature. Returns ------- str The assembled docstring. """ parsed = parsed.copy() indent = parsed.pop('indent') pad = ' '*indent # Sort 'Parameters' section according to signature if sig is not None and 'Parameters' in parsed: order = tuple(sig.parameters.keys()) def sort_index(p): key = p[0].split(':')[0].strip(' *') if key == '': return 9999 return order.index(key) parsed['Parameters'] = sorted(parsed['Parameters'], key=sort_index) d = [] for k, v in parsed.items(): if isinstance(v[0], list): flat_v = reduce(add, v) else: flat_v = v if k is not None: d.extend(['', pad + k, pad + '-'*len(k)]) d.extend([(pad + l).rstrip() for l in flat_v]) return '\n'.join(d)
90553c468a2b113d3f26720128e384b0444d5c93
705,968
import numpy def padArray(ori_array, pad_size): """ Pads out an array to a large size. ori_array - A 2D numpy array. pad_size - The number of elements to add to each of the "sides" of the array. The padded 2D numpy array. """ if (pad_size > 0): [x_size, y_size] = ori_array.shape lg_array = numpy.ones((x_size+2*pad_size,y_size+2*pad_size)) lg_array[pad_size:(x_size+pad_size),pad_size:(y_size+pad_size)] = ori_array.astype(numpy.float64) lg_array[0:pad_size,:] = numpy.flipud(lg_array[pad_size:2*pad_size,:]) lg_array[(x_size+pad_size):(x_size+2*pad_size),:] = numpy.flipud(lg_array[x_size:(x_size+pad_size),:]) lg_array[:,0:pad_size] = numpy.fliplr(lg_array[:,pad_size:2*pad_size]) lg_array[:,(y_size+pad_size):(y_size+2*pad_size)] = numpy.fliplr(lg_array[:,y_size:(y_size+pad_size)]) return lg_array else: return ori_array
28fac7ccb8fc08c3ac7cf3104fed558128003750
705,969
import torch def log_cumsum(probs, dim=1, eps=1e-8): """Calculate log of inclusive cumsum.""" return torch.log(torch.cumsum(probs, dim=dim) + eps)
7f1ab77fd9909037c7b89600c531173dab80c11e
705,970
def race_from_string(str): """Convert race to one of ['white', 'black', None].""" race_dict = { "White/Caucasian": 'white', "Black/African American": 'black', "Unknown": None, "": None } return race_dict.get(str, 'other')
1d38469537c3f5f6a4a42712f5ec1dbd26a471bd
705,971
def suspend_supplier_services(client, logger, framework_slug, supplier_id, framework_info, dry_run): """ The supplier ID list should have been flagged by CCS as requiring action, but double check that the supplier: - has some services on the framework - has `agreementReturned: false` - has not `agreementReturned: on-hold :param client: API client instance :param framework_info: JSON :param dry_run: don't suspend if True :return: suspended_service_count :rtype: int """ suspended_service_count = 0 # Ignore any 'private' services that the suppliers have removed themselves new_service_status, old_service_status = 'disabled', 'published' if not framework_info['frameworkInterest']['onFramework']: logger.error(f'Supplier {supplier_id} is not on the framework.') return suspended_service_count if framework_info['frameworkInterest']['agreementReturned']: logger.error(f'Supplier {supplier_id} has returned their framework agreement.') return suspended_service_count if framework_info['frameworkInterest']['agreementStatus'] == 'on-hold': logger.error(f"Supplier {supplier_id}'s framework agreement is on hold.") return suspended_service_count # Find the supplier's non-private services on this framework services = client.find_services( supplier_id=supplier_id, framework=framework_slug, status=old_service_status ) if not services['services']: logger.error(f'Supplier {supplier_id} has no {old_service_status} services on the framework.') return suspended_service_count # Suspend all services for each supplier (the API will de-index the services from search results) logger.info( f"Setting {services['meta']['total']} services to '{new_service_status}' for supplier {supplier_id}." ) for service in services['services']: if dry_run: logger.info(f"[DRY RUN] Would suspend service {service['id']} for supplier {supplier_id}") else: client.update_service_status(service['id'], new_service_status, "Suspend services script") suspended_service_count += 1 # Return suspended service count (i.e. if > 0, some emails need to be sent) return suspended_service_count
6442bf7f287126c6e1fe445fa9bca1ccde4d142f
705,972
from typing import List def normalize(value: str) -> str: """Normalize a string by removing '-' and capitalizing the following character""" char_list: List[str] = list(value) length: int = len(char_list) for i in range(1, length): if char_list[i - 1] in ['-']: char_list[i] = char_list[i].upper() return ''.join(char_list).replace('-', '')
52c1c8b5e950347cf63ed15d1efde47046b07873
705,973
def fitness(member): """Computes the fitness of a species member. http://bit.ly/ui-lab5-dobrota-graf""" if member < 0 or member >= 1024: return -1 elif member >= 0 and member < 30: return 60.0 elif member >= 30 and member < 90: return member + 30.0 elif member >= 90 and member < 120: return 120.0 elif member >= 120 and member < 210: return -0.83333 * member + 220 elif member >= 210 and member < 270: return 1.75 * member - 322.5 elif member >= 270 and member < 300: return 150.0 elif member >= 300 and member < 360: return 2.0 * member - 450 elif member >= 360 and member < 510: return -1.8 * member + 918 elif member >= 510 and member < 630: return 1.5 * member - 765 elif member >= 630 and member < 720: return -1.33333 * member + 1020 elif member >= 720 and member < 750: return 60.0 elif member >= 750 and member < 870: return 1.5 * member - 1065 elif member >= 870 and member < 960: return -2.66667 * member + 2560 else: return 0
81e8bd12da458f0c4f2629e3781c2512c436c577
705,974
def next_power_of_two(v: int): """ returns x | x == 2**i and x >= v """ v -= 1 v |= v >> 1 v |= v >> 2 v |= v >> 4 v |= v >> 8 v |= v >> 16 v += 1 return v
9c62840e2dc2cd44666328c32c48e5867523ba6c
705,975
def add_token(token_sequence: str, tokens: str) -> str: """Adds the tokens from 'tokens' that are not already contained in `token_sequence` to the end of `token_sequence`:: >>> add_token('', 'italic') 'italic' >>> add_token('bold italic', 'large') 'bold italic large' >>> add_token('bold italic', 'bold') 'bold italic' >>> add_token('red thin', 'stroked red') 'red thin stroked' """ for tk in tokens.split(' '): if tk and token_sequence.find(tk) < 0: token_sequence += ' ' + tk return token_sequence.lstrip()
2506dd00b55e9d842dc90c40578e0c21d942e73e
705,976
import argparse def add_arguments(): """ Function to parse the command line arguments Options: [-f]: Name of text file with corpus """ parser = argparse.ArgumentParser() parser.add_argument('-f', '--file', type=str, help="The file with the text.") return parser.parse_args()
c5fe0be82cf0eb2941e08fc6744bb1b7517bba78
705,977
def process_h5_file(h5_file): """Do the processing of what fields you'll use here. For example, to get the artist familiarity, refer to: https://github.com/tbertinmahieux/MSongsDB/blob/master/PythonSrc/hdf5_getters.py So we see that it does h5.root.metadata.songs.cols.artist_familiarity[songidx] and it would translate to: num_songs = len(file['metadata']['songs']) file['metadata']['songs'][:num_songs]['artist_familiarity'] Since there is one song per file, it simplifies to: file['metadata']['songs'][:1]['artist_familiarity'] I recommend downloading one file, opening it with h5py, and explore/practice To see the datatype and shape: http://millionsongdataset.com/pages/field-list/ http://millionsongdataset.com/pages/example-track-description/ """ return h5_file['metadata']['songs'][:1]['artist_familiarity'][0]
de664a3e1ea88c8c8cd06b210e4c6b756a4d02a6
705,978
def binary_str(num): """ Return a binary string representation from the posive interger 'num' :type num: int :return: Examples: >>> binary_str(2) '10' >>> binary_str(5) '101' """ # Store mod 2 operations results as '0' and '1' bnum = '' while num > 0: bnum = str(num & 0x1) + bnum num = num >> 1 return bnum
dde400323fccb9370c67197f555d9c41c40084a6
705,979
def compute_phot_error(flux_variance, bg_phot, bg_method, ap_area, epadu=1.0): """Computes the flux errors using the DAOPHOT style computation Parameters ---------- flux_variance : array flux values bg_phot : array background brightness values. bg_method : string background method ap_area : array the area of the aperture in square pixels epadu : float (optional) Gain in electrons per adu (only use if image units aren't e-). Default value is 1.0 Returns ------- flux_error : array an array of flux errors """ bg_variance_terms = (ap_area * bg_phot['aperture_std'] ** 2.) * (1. + ap_area/bg_phot['aperture_area']) variance = flux_variance / epadu + bg_variance_terms flux_error = variance ** .5 return flux_error
4470277ebc41cce0e2c8c41c2f03e3466473d749
705,980
def get_description(soup): """Извлечь текстовое описание вакансии""" non_branded = soup.find('div', {'data-qa':'vacancy-description'}) branded = soup.find('div', {'class':'vacancy-section HH-VacancyBrandedDescription-DANGEROUS-HTML'}) description = non_branded or branded return description.get_text()
82a254774be3eb55762d9964928429d168572a2a
705,981
def getStrVector(tarstr,cdict,clen=None): """ 将字符串向量化,向量的每一项对应字符集中一种字符的频数,字符集、每种字符在向量中对应的下标由cdict提供 """ if not clen: clen=len(cdict.keys()) vec=[0]*clen for c in tarstr: vec[cdict[c]]+=1 #vec[cdict[c]]=1 return vec
d23b54288d6a9cff2f3999c54f53a7f8e2d5d35f
705,982
def weighted_sequence_identity(a, b, weights, gaps='y'): """Compute the sequence identity between two sequences, different positions differently The definition of sequence_identity is ambyguous as it depends on how gaps are treated, here defined by the *gaps* argument. For details and examples, see `this page <https://pyaln.readthedocs.io/en/latest/tutorial.html#sequence-identity>`_ Parameters ---------- a : str first sequence, with gaps encoded as "-" b : str second sequence, with gaps encoded as "-" weights : list of float list of weights. Any iterable with the same length as the two input sequences (including gaps) is accepted. The final score is divided by their sum (except for positions not considered, as defined by the gaps argument). gaps : str defines how to take into account gaps when comparing sequences pairwise. Possible values: - 'y' : gaps are considered and considered mismatches. Positions that are gaps in both sequences are ignored. - 'n' : gaps are not considered. Positions that are gaps in either sequences compared are ignored. - 't' : terminal gaps are trimmed. Terminal gap positions in either sequences are ignored, others are considered as in 'y'. - 'a' : gaps are considered as any other character; even gap-to-gap matches are scored as identities. Returns ------- float sequence identity between the two sequences Examples -------- >>> weighted_sequence_identity('ATGCA', ... 'ATGCC', weights=[1, 1, 1, 1, 6]) 0.4 >>> weighted_sequence_identity('ATGCA', ... 'ATGCC', weights=[1, 1, 1, 1, 1]) 0.8 Note ---- To compute sequence identity efficiently among many sequences, use :func:`~pyaln.Alignment.score_similarity` instead. See also -------- pyaln.Alignment.score_similarity, weighted_sequence_identity """ if len(a)!=len(b): raise IndexError('sequence_identity ERROR sequences do not have the same length') if len(weights)!=len(a): raise IndexError('sequence_identity ERROR weights must be the same length as sequences') if gaps=='y': pos_to_remove=[i for i in range(len(a)) if a[i]=='-' and b[i]=='-' ] elif gaps=='n': pos_to_remove=[i for i in range(len(a)) if a[i]=='-' or b[i]=='-' ] elif gaps=='t': pos_to_remove=[i for i in range(len(a)) if a[i]=='-' and b[i]=='-' ] for s in [a,b]: for i,c in enumerate(s): if c=='-': pos_to_remove.append(i) else: break for i, c in reversed(list(enumerate(s))): if c=='-': pos_to_remove.append(i) else: break elif gaps=='a': total_weight= sum( weights ) count_identical=sum([int(ca == b[i])*weights[i] for i,ca in enumerate(a)]) return count_identical/total_weight if total_weight else 0.0 else: raise Exception('sequence_identity ERROR gaps argument must be one of {a, y, n, t}') exclude_pos=set(pos_to_remove) actual_weights=[w for i,w in enumerate(weights) if not i in exclude_pos] total_weight= sum( actual_weights ) count_identical=sum([int(ca == b[i] and ca!='-' )*weights[i] for i,ca in enumerate(a) if not i in exclude_pos]) return count_identical/( total_weight ) if total_weight else 0.0
8face090454e984d0d4b9ea5fe78b6600a6e6b03
705,983
def parseText(text1, nlp): """Run the Spacy parser on the input text that is converted to unicode.""" doc = nlp(text1) return doc
99d6a585358a700f8fc48c5dc4fc761a03ab42a7
705,984
import os def read_rc(rcpath): """Retrieve color values from the rc file. Arguments: rcpath (str): path to the rc file. Returns: 3-tuple of integers representing R,G,B. """ if not os.path.exists(rcpath): return None with open(rcpath) as rc: colors = [int(ln) for ln in rc.readlines() if ln.strip()[0] in '1234567890'] if len(colors) != 3: return None return colors
43dae96c2115b0648774cf682209af14b9b42638
705,985
import socket def is_port_open(host, port, timeout=5): """ verifies if a port is open in a remote host :param host: IP of the remote host :type host: str :param port: port to check :type port: int :param timeout: timeout max to check :type timeout: int :return: True if the port is open :rtype: bool """ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.settimeout(timeout) result = sock.connect_ex((host, port)) return result is 0
1805727a3bb007cd5686475de686cf7bceac83a1
705,986
from multiprocessing import cpu_count def use_processors(n_processes): """ This routine finds the number of available processors in your machine """ available_processors = cpu_count() n_processes = n_processes % (available_processors+1) if n_processes == 0: n_processes = 1 print('WARNING: Found n_processes = 0. Falling back to default single-threaded execution (n_processes = 1).') return n_processes
73877393aac6b4da68fb0216bf046601ce1fa99e
705,987
def reversedict(dct): """ Reverse the {key:val} in dct to {val:key} """ # print labelmap newmap = {} for (key, val) in dct.iteritems(): newmap[val] = key return newmap
f7a5a102546270a2e6aa7fb52d4fa6dd5e826753
705,988
import re def commonIntegerPredicate(field): """"return any integers""" return tuple(re.findall("\d+", field))
955dc61fa4293f21c707b538ea218b15d5a95fb2
705,990
def encode(text): """ Encode to base64 """ return [int(x) for x in text.encode('utf8')]
af51272d8edc25d46695ea3b35fd395ad26321b5
705,991
import socket def _get_ip(): """ :return: This computer's default AF_INET IP address as a string """ # find ip using answer with 75 votes # https://stackoverflow.com/questions/166506/finding-local-ip-addresses-using-pythons-stdlib ip = '' sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) try: # apparently any IP will work sock.connect(('192.168.1.1', 1)) ip = sock.getsockname()[0] except Exception as e: print(e) print('Error: Couldn\'t get IP! Make sure you are connected to a network.') finally: sock.close() return str(ip)
f39c961877a1ec026596a7ced01679411962fca4
705,992
def _get_value(cav, _type): """Get value of custom attribute item""" if _type == 'Map:Person': return cav["attribute_object"]["id"] \ if cav.get("attribute_object") else None if _type == 'Checkbox': return cav["attribute_value"] == '1' return cav["attribute_value"]
c8210579cf8b2a29dffc1f28a6e204fc9f89f274
705,993
def get_message_id(update: dict, status_update: str) -> int: """функция для получения номера сообщения. Описание - функция получает номер сообщения от пользователя Parameters ---------- update : dict новое сообщение от бота status_update : str состояние сообщения, изменено или новое Returns ------- message_status : str статус сообщения, если новое, то message, если отредактированое edited_message """ return update[status_update]['message_id']
9b299c94e322ad9cea92fd73cb9e7a55f3364caa
705,994
from bs4 import BeautifulSoup def get_movie_names(url_data): """Get all the movies from the webpage""" soup = BeautifulSoup(url_data, 'html.parser') data = soup.findAll('ul', attrs={'class' : 'ctlg-holder'}) #Get all the lines from HTML that are a part of ul with class = 'ctlg-holder' movie_list = [] for div in data: links = div.findAll('a') #Choose all the lines with links for a in links: if a is not None and a is not "#": movie_list.append(a.get('href', None)) print("Movie Names Obtained") return movie_list
1cae6b0093f0e0ca9e361bdc207be9ea654e7c2b
705,995
def _gen_find(subseq, generator): """Returns the first position of `subseq` in the generator or -1 if there is no such position.""" if isinstance(subseq, bytes): subseq = bytearray(subseq) subseq = list(subseq) pos = 0 saved = [] for c in generator: saved.append(c) if len(saved) > len(subseq): saved.pop(0) pos += 1 if saved == subseq: return pos return -1
ec89e787a61d684e2a7d0c8c2d0fb9c89cf73ada
705,996
def all_permits(target_dynamo_table): """ Simply return all data from DynamoDb Table :param target_dynamo_table: :return: """ response = target_dynamo_table.scan() data = response['Items'] while response.get('LastEvaluatedKey', False): response = target_dynamo_table.scan(ExclusiveStartKey=response['LastEvaluatedKey']) data.extend(response['Items']) return data
8efdaf4ff407d0e2ce8dd592eeac766b0ec2264b
705,997
def maximum_difference_sort_value(contributions): """ Auxiliary function to sort the contributions for the compare_plot. Returns the value of the maximum difference between values in contributions[0]. Parameters ---------- contributions: list list containing 2 elements: a Numpy.ndarray of contributions of the indexes compared, and the features' names. Returns ------- value_max_difference : float Value of the maximum difference contribution. """ if len(contributions[0]) <= 1: max_difference = contributions[0][0] else: max_difference = max( [ abs(contrib_i - contrib_j) for i, contrib_i in enumerate(contributions[0]) for j, contrib_j in enumerate(contributions[0]) if i <= j ] ) return max_difference
cd7f66ec252199fb01b9891440d0f7da370c7b8e
705,998
def human_format(num): """ :param num: A number to print in a nice readable way. :return: A string representing this number in a readable way (e.g. 1000 --> 1K). """ magnitude = 0 while abs(num) >= 1000: magnitude += 1 num /= 1000.0 return '%.2f%s' % (num, ['', 'K', 'M', 'G', 'T', 'P'][magnitude])
41e4f3823f756588c18b0fb926949a5aca9c6942
705,999
def post_equals_form(post, json_response): """ Checks if the posts object is equal to the json object """ if post.title != json_response['title']: return False if post.deadline != json_response['deadline']: return False if post.details != json_response['details']: return False if post.category != json_response['category']: return False if post.preferred_contact != json_response['preferred_contact']: return False if post.zip_code != json_response['zip_code']: return False return True
965a533c7ebbb70001bcdcb0e143b617708807e3
706,000
def add(number1, number2): """ This functions adds two numbers Arguments: number1 : first number to be passed number2 : second number to be passed Returns: number1*number2 the result of two numbers Examples: >>> add(0,0) 0 >>> add(1,1) 2 >>> add(1.1,2.2) 3.3000000000000003 """ return number1 + number2
5db1a461f65672d5fc1201a82657fada30220743
706,001
def calculate_timeout(start_point, end_point, planner): """ Calucaltes the time limit between start_point and end_point considering a fixed speed of 5 km/hr. Args: start_point: initial position end_point: target_position planner: to get the shortest part between start_point and end_point Returns: time limit considering a fixed speed of 5 km/hr """ path_distance = planner.get_shortest_path_distance( [start_point.location.x, start_point.location.y, 0.22], [ start_point.orientation.x, start_point.orientation.y, 0.22], [ end_point.location.x, end_point.location.y, end_point.location.z], [ end_point.orientation.x, end_point.orientation.y, end_point.orientation.z]) return ((path_distance / 1000.0) / 5.0) * 3600.0 + 10.0
cb7ae44df9b6a89d2e171046fa0bdfe3f81445c5
706,002
import os import posixpath def NormalizePath(path): """Returns a path normalized to how we write DEPS rules and compare paths.""" return os.path.normcase(path).replace(os.path.sep, posixpath.sep)
e6a6c7a50176f6990841a48748e5951c4f40b8af
706,003
import torch def load_model(model, model_path): """ Load model from saved weights. """ if hasattr(model, "module"): model.module.load_state_dict(torch.load(model_path, map_location="cpu"), strict=False) else: model.load_state_dict(torch.load(model_path, map_location="cpu"), strict=False) return model
0fbf34548474c4af89c25806f05d1e7d3170bbde
706,004
def get_file_size(filepath: str): """ Not exactly sure how os.stat or os.path.getsize work, but they seem to get the total allocated size of the file and return that while the file is still copying. What we want, is the actual file size written to disk during copying. With standard Windows file copying, we can just try open/close the file, and if that succeeds, the file is finished. With Kongsberg systems writing to disk, we can actually open and read the .all file as it copies, so the try/except is not good enough. This function will find the length of the actual readable data on disk. Parameters ---------- filepath file path to a file being written Returns ------- int file size in bytes """ with open(filepath, "r") as file: # move pointer to the end of the file file.seek(0, 2) # retrieve the current position of the pointer # this will be the file's size in bytes size = file.tell() return size
6936a8227a96e3ebc4b1146f8363f092d232cafd
706,005
import json def get_aws_regions_from_file(region_file): """ Return the list of region names read from region_file. The format of region_file is as follows: { "regions": [ "cn-north-1", "cn-northwest-1" ] } """ with open(region_file) as r_file: region_data = json.load(r_file) return sorted(r for r in region_data.get("regions"))
639da8c6417295f97621f9fd5321d8499652b7b2
706,006
def from_string_to_bytes(a): """ Based on project: https://github.com/chaeplin/dashmnb. """ return a if isinstance(a, bytes) else bytes(a, 'utf-8')
e76509f1be8baf8df0bf3b7160615f9a9c04ff86
706,007
def split(x, divider): """Split a string. Parameters ---------- x : any A str object to be split. Anything else is returned as is. divider : str Divider string. """ if isinstance(x, str): return x.split(divider) return x
e77a162777d9bb13262e4686ba1cb9732ebab221
706,008
def _plat_idx_to_val(idx: int , edge: float = 0.5, FIO_IO_U_PLAT_BITS: int = 6, FIO_IO_U_PLAT_VAL: int = 64) -> float: """ Taken from fio's stat.c for calculating the latency value of a bin from that bin's index. idx : the value of the index into the histogram bins edge : fractional value in the range [0,1]** indicating how far into the bin we wish to compute the latency value of. ** edge = 0.0 and 1.0 computes the lower and upper latency bounds respectively of the given bin index. """ # MSB <= (FIO_IO_U_PLAT_BITS-1), cannot be rounded off. Use # all bits of the sample as index if (idx < (FIO_IO_U_PLAT_VAL << 1)): return idx # Find the group and compute the minimum value of that group error_bits = (idx >> FIO_IO_U_PLAT_BITS) - 1 base = 1 << (error_bits + FIO_IO_U_PLAT_BITS) # Find its bucket number of the group k = idx % FIO_IO_U_PLAT_VAL # Return the mean (if edge=0.5) of the range of the bucket return base + ((k + edge) * (1 << error_bits))
f992194492e031add3d14f0e145888303a5b4f06
706,009
def is_blank(value): """ Returns True if ``value`` is ``None`` or an empty string. >>> is_blank("") True >>> is_blank(0) False >>> is_blank([]) False """ return value is None or value == ""
6a30f9f6726701a4b7a9df8957503111a5222558
706,010