content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def _convert_str_to_html(string):
"""Helper function to insert <br> at line endings etc."""
if not string: return ""
lines = string.splitlines()
for index, line in enumerate(lines):
for char in line:
if char == '\t':
lines[index] = line.replace(char, " ", 1)
elif char == " ":
lines[index] = line.replace(char, " ")
else:
break
return "<br />".join(lines) | b357d04f28a08f6b65d98ee381dcaef8969f6ff0 | 701,969 |
def _parseLinks(response, rel):
"""
Parses an HTTP response's ``Link`` headers of a given relation, according
to the Corelight API specification.
response (requests.Response): The response to parse the ``Link`` headers
out of.
rel (str): The link relation type to parse; all other relations are ignored.
Returns: A list of 2-tuples ``(string, string)`` where the 1st string is
the URL parsed out of a ``Link`` header; and the 2nd string is the optional
title associated with the link or None if none.
"""
links = response.headers.get("Link", None)
if not links:
return []
result = []
for l in links.split(","):
m = l.split(">", 1)
url = m[0].strip()[1:]
params = {}
for p in m[1].split(";"):
if not p:
continue
(k, v) = p.split("=")
params[k.strip().lower()] = v.strip()
if params.get("rel", None) != rel:
continue
title = params.get("title", None)
result.append((url, title))
return result | 73a4f2a7e981b335aa511e5e311ef7290a7695e3 | 701,970 |
def find_min_max(data):
"""Solution to exercise C-4.9.
Write a short recursive Python function that finds the minimum and
maximum values in a sequence without using any loops.
"""
n = len(data)
min_val = data[0]
max_val = data[0]
def recurse_minmax(idx):
nonlocal min_val, max_val
if idx == n:
return min_val, max_val # Base case
if data[idx] > max_val:
max_val = data[idx]
elif data[idx] < min_val:
min_val = data[idx]
return recurse_minmax(idx + 1)
return recurse_minmax(1) | b8f50d1dafa0f66ab61db8d4974c8d201cd4dc3c | 701,971 |
import torch
from functools import reduce
def mask_with_tokens(t, token_ids):
"""
用记号遮掩
"""
init_no_mask = torch.full_like(t, False, dtype=torch.bool)
mask = reduce(lambda acc, el: acc | (t == el), token_ids, init_no_mask)
# print("mask",mask)
return mask | f233f2e2111e02919ff3795820f024b0e5b850a1 | 701,972 |
def isStandard(descriptorType):
"""
>>> isStandard(0x0a)
True
>>> isStandard(0x22)
False
>>> isStandard(0x61)
False
>>> isStandard(0x1a)
True
"""
# See USB Common Class Specification s. 3.11 for this field's structure
# Bit 7: reserved
# Bits 6..5: descriptor type
# STANDARD = 0
# CLASS = 1
# VENDOR = 2
# RESERVED = 3
# Bits 4..0: descriptor ID
if ((descriptorType & 0b01100000) >> 5) == 0:
return True
else:
return False | a4e002e58c07638cb14d1a13a27ac7232af140cf | 701,973 |
def cell_content_to_str(v):
"""
Convert the value of a cell to string
:param v: Value of a cell
:return:
"""
if v:
if isinstance(v, float) or isinstance(v, int):
return str(int(v))
else:
return str(v).strip()
else:
return None | b73746d735ed155d387512704b91fc92c0c93560 | 701,975 |
import os
def enter_file(file_type, file_path=""):
"""Request file path from user until path
exists.
Parameters
----------
file_type: str
Type of file to display in input line
file_path: str (optional)
Initial file path to try
"""
while not os.path.exists(file_path):
file_path = input(
f"\n{file_type} file not recognised: "
f"Re-enter file path: "
)
return file_path | 7470761a3b41eedd7a6d98f088b7a693bfd7428a | 701,976 |
def __hamming_distance_with_hash(dhash1, dhash2):
"""
*Private method*
根据dHash值计算hamming distance
:param dhash1: str
:param dhash2: str
:return: 汉明距离(int)
"""
difference = (int(dhash1, 16)) ^ (int(dhash2, 16))
return bin(difference).count("1") | c8a28a3f20a037fe9e96bfe82cb522caf6600337 | 701,977 |
def _min_to_sec(minutes):
"""converts minutes to seconds,
assuming that input is a number representing minutes"""
return minutes*60 | dff3330038c7e8cd1abda2c8a0a4433979fedf58 | 701,978 |
def get_ip(request) -> str:
"""
获取当前请求的ip地址
:param request:
:return:
"""
if request.META.get('HTTP_X_FORWARDED_FOR', None):
ip = request.META['HTTP_X_FORWARDED_FOR']
else:
ip = request.META['REMOTE_ADDR']
return ip | eb084135920231aa176e6099d70ed3954b407070 | 701,979 |
def get_file_data_old(archivo):
"""
Separa del nombre del archivo y extrae nombre de Centroide (ctrd)
y cultivo (clt)
"""
diccionario = {}
listado = archivo.split('-')
diccionario['ctrd'] = listado[0]
condicion = 'TS(S2)' in listado[1] or\
'TS(TC)' in listado[1] or\
'TL' in listado[1] or\
'TS(TL)' in listado[1]
if '.' in listado[1]:
if 'P.txt' in listado[1]:
diccionario['clt'] = listado[1].split('.')[0]
elif condicion:
diccionario['clt'] = listado[1].split('.')[0]
else:
diccionario['clt'] = listado[1].split('.')[0] + listado[1].split('.')[1]
elif 'S1-V' in archivo:
diccionario['clt'] = listado[1] + '-' + listado[2].split('.')[0]
else:
diccionario['clt'] = 'QUE'
return diccionario | 4d02b5b77aaf0abfc768c898534493d3723ef54e | 701,980 |
from typing import Dict
def dict_squares(n: int) -> Dict[int, int]:
"""Generates a dictionary with numbers from 0 to n as keys
which are mapped to their squares using dictionary comprehension.
doctests:
>>> dict_squares(2)
{0: 0, 1: 1, 2: 4}
>>> dict_squares(5)
{0: 0, 1: 1, 2: 4, 3: 9, 4: 16, 5: 25}
"""
x = {i: i ** 2 for i in range(0, n + 1)}
return x | 4101d0256069c07da6c3d5a8d1e40783a4b26eea | 701,981 |
import argparse
def parse_args():
"""
Initializes command line arguments and
parses them on startup returning the parsed
args namespace.
"""
parser = argparse.ArgumentParser()
bot = parser.add_argument_group('Discord Bot')
bot.add_argument(
'--token', '-t', required=True, type=str,
help='The discord bot token')
bot.add_argument(
'--prefix', '-p', default='>', type=str,
help='The command prefix of the bot (def: \'>\')')
bot.add_argument(
'--allow-sudo', default=False, action='store_true',
help='Whether or not sudo command should be enabled')
rcon = parser.add_argument_group('RCON Connection')
rcon.add_argument(
'--rcon-address', '-raddr', default='localhost:25575', type=str,
help='The address of the RCON server (def: \'localhost:25575\')')
rcon.add_argument(
'--rcon-password', '-rpw', required=True, type=str,
help='The password of the RCON server')
rcon.add_argument(
'--rcon-encoding', default='utf-8', type=str,
help='The encoding to be used for RCON payloads')
rcon.add_argument(
'--rcon-fetch-freq', default=30, type=int,
help='The interval in seconds in which the server stats will be polled')
parser.add_argument(
'--log-level', '-l', default=20, type=int,
help='Set log level of the default logger (def: 20)')
parser.add_argument(
'--db-file', '-db', default='database.db', type=str,
help='Set database file location (def: database.db)')
return parser.parse_args() | 3f8ae0e284c9b917b76bac48a4f9bd254e76403d | 701,982 |
def wrap_coro(coro, unpack, *args, **kwargs):
""" building a coroutine receiving one argument and call it curried
with *args and **kwargs and unpack it (if unpack is set)
"""
if unpack:
async def _coro(value):
return await coro(*args, *value, **kwargs)
else:
async def _coro(value):
return await coro(*args, value, **kwargs)
return _coro | 2e1916f5f34be5878a3af64ea28b5ffb56f6b350 | 701,983 |
from typing import Any
import typing
def hint_is_specialized(hint: Any, target: Any) -> bool:
"""Checks if a type hint is a specialized version of target.
E.g., hint_is_specialized(ClassVar[int], ClassVar) is True.
isinstance will invoke type-checking, which this methods sidesteps.
Behavior is undefined for simple type hints that don't take a type
argument, like Any or a bare type.
"""
return typing.get_origin(hint) is target | b651fc05290de82ab5a5833d10ca68d6a96f2d7a | 701,984 |
def build_spc_queue(rxn_lst):
""" Build spc queue from the reaction lst for the drivers
:return spc_queue: all the species and corresponding models in rxn
:rtype: list[(species, model),...]
"""
if 'all' in rxn_lst:
# First check if rxn_lst is a bunch of species
spc_queue = rxn_lst['all']['species']
else:
# Build the list from expanding the reacs and prods
spc_queue = []
for rxn in rxn_lst:
model = rxn['model']
spc_queue.extend(((reac, model) for reac in rxn['reacs']))
spc_queue.extend(((prod, model) for prod in rxn['prods']))
return spc_queue | 0dbe4e2bc3db16dc5dc83a55f0f802d4dfae853f | 701,985 |
def patch_telomeres(bands_by_chr):
"""Account for special case with Drosophila melanogaster
"""
for chr in bands_by_chr:
first_band = bands_by_chr[chr][0]
start = first_band[1]
if start != '1':
stop = str(int(start) - 1)
pter_band = ['pter', '1', stop, '1', stop, 'gpos']
bands_by_chr[chr].insert(0, pter_band)
new_bands = {}
for chr in bands_by_chr:
new_bands[chr] = []
for band in bands_by_chr[chr]:
band.insert(0, 'q')
new_bands[chr].append(band)
bands_by_chr = new_bands
return bands_by_chr | 23057227c526bb3837fd0c02233ddac4671c6956 | 701,986 |
from typing import List
def smallest_positive_integer_not_in_array(arr: List[int]) -> int:
"""
[1..N] can cover everything from 1 to (N * (N+1) / 2)
"""
res = 1
for num in arr:
if num > res:
return res
else:
res += num
return res | 9d01e051c278beab40aefa6618a4a5f4934e451a | 701,987 |
def module_enclosing_func(offset):
""" Test function to see if module-level enclosures are detected """
def module_closure_func(self):
"""
Actual closure function, should be reported as:
putil.tests.my_module.module_enclosing_func.module_closure_func
"""
self._exobj.add_exception(
exname='illegal_value',
extype=TypeError,
exmsg='Argument `value` is not valid'
)
return self._value1+offset
return module_closure_func | 399212d5cc04479639cdb5cacb50b167327f2445 | 701,988 |
def get_ini_conf(fname):
""" Very simple one-lined .ini file reader, with no error checking """
with open(fname, "r") as handle:
return {i.split("=")[0].strip(): i.split("=")[-1].strip() for i in handle.readlines() if i.strip()} | 180c3106bb40c26b6628ff19dcb2c233a7f6a8d7 | 701,989 |
import socket
def whois(ip_address):
"""Whois client for Python"""
whois_ip = str(ip_address)
try:
query = socket.gethostbyname(whois_ip)
except Exception:
query = whois_ip
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("whois.ripe.net", 43))
s.send(query.encode("utf8") + b"\r\n")
answer = b""
while True:
d = s.recv(4096)
answer += d
if not d:
break
s.close()
ignore_tag = b"remarks:"
# ignore all lines starting with the ignore_tag
lines = [line for line in answer.split(b"\n") if not line or (line and not line.startswith(ignore_tag))] # noqa: E501
# remove empty lines at the bottom
for i in range(1, len(lines)):
if not lines[-i].strip():
del lines[-i]
else:
break
return b"\n".join(lines[3:]) | 7440f936ca866bc74ccd4e0d81bff56e46b82f60 | 701,990 |
import json
import logging
def SubtractHistogram(histogram_json, start_histogram_json):
"""Subtracts a previous histogram from a histogram. Both parameters are json
serializations of histograms."""
start_histogram = json.loads(start_histogram_json)
# It's ok if the start histogram is empty (we had no data, maybe even no
# histogram at all, at the start of the test).
if 'buckets' not in start_histogram:
return histogram_json
start_histogram_buckets = dict()
for b in start_histogram['buckets']:
start_histogram_buckets[b['low']] = b['count']
new_buckets = []
histogram = json.loads(histogram_json)
for b in histogram['buckets']:
new_bucket = b
low = b['low']
if low in start_histogram_buckets:
new_bucket['count'] = b['count'] - start_histogram_buckets[low]
if new_bucket['count'] < 0:
logging.error('Histogram subtraction error, starting histogram most '
'probably invalid.')
if new_bucket['count']:
new_buckets.append(new_bucket)
histogram['buckets'] = new_buckets
histogram['count'] -= start_histogram['count']
return json.dumps(histogram) | ac346f7d2e132b8577c957f89cd7fb6150207bf1 | 701,992 |
def _front_left_tire_pressure_supported(data):
"""Determine if front left tire pressure is supported."""
return data["status"]["tirePressure"]["frontLeftTirePressurePsi"] is not None | b59ab6f4a9b3d0801c1c5c8798e7da2fab0b580d | 701,993 |
def count_missing_doc_types(articles):
"""
:param articles A PyMongo collection of articles
:return: int: Number or articles without a a 'doc_type' property or having
it equal to the empty string ('')
"""
return articles.count_documents(
{"$or": [{"doc_type": {"$exists": False}}, {"doc_type": ""}]}
) | b0e734590c4b74572382e377f9cd861fa5162af7 | 701,994 |
import argparse
def parse_args():
""" Parses the arguments of the command line """
parser = argparse.ArgumentParser(
description="Checks a file for british and american spellings")
parser.add_argument('files', metavar="files", type=str, nargs='+',
help='file where to check the spellings')
return parser.parse_args() | 1ea6d692eedbd2c448138cf0adc3a85cf2a85283 | 701,995 |
from datetime import datetime
def now_int():
"""
Returns the current POSIX time as an integer.
:return: integer POSIX time
"""
now = datetime.now() - datetime(1970, 1, 1)
return int(now.total_seconds()) | 3c70a3324b549d24aaac01f49a2ed7ef480a8eb7 | 701,996 |
def module_loaded(module):
"""
Checks if the specified kernel-module has been loaded.
:param module: Name of the module to check
:return: True if the module is loaded, False if not.
"""
return any(s.startswith(module) for s in open("/proc/modules").readlines()) | f09e719acba7f8e2aed59816d3b99bd9575edcfd | 701,998 |
def IsMonophyleticForTaxa(tree,
taxa,
support=None):
"""check if a tree is monophyletic for a list of taxa.
Arguments
---------
tree : :class:`Tree`
Tree to analyse
taxa : list
List of taxa
support : float
Minimum bootstrap support
Returns
-------
bool
"""
tree.root_with_outgroup(taxa)
if support:
n = tree.is_monophyletic(taxa)
if n == -1:
return False
return tree.node(tree.node(tree.root).succ[0]).data.support >= support
else:
return tree.is_monophyletic(taxa) != -1 | fcb0066c4083183cc7b81195a0845897d95b1cde | 701,999 |
def year_range(entry):
"""Show an interval of employment in years."""
val = ""
if entry.get("start_date") is None or entry["start_date"]["year"]["value"] is None:
val = "unknown"
else:
val = entry["start_date"]["year"]["value"]
val += "-"
if entry.get("end_date") is None or entry["end_date"]["year"]["value"] is None:
val += "present"
else:
val += entry["end_date"]["year"]["value"]
return val | 92f7f0bcb450303161b7f766148a9feac62f98d1 | 702,002 |
def extract_BIO_tagged_tokens(text, source_spans, tokenizer):
""" Разобьем на bio-токены по безпробельной разметке """
tokens_w_tags = []
for span in source_spans:
s,e,tag = span
tokens = tokenizer(text[s:e])
if tag == 'Other':
tokens_w_tags += [(token,tag) for token in tokens]
else:
tokens_w_tags.append((tokens[0],'B-'+tag))
for token in tokens[1:]:
tokens_w_tags.append((token,'I-'+tag))
return tokens_w_tags | 7b56295f36040b68a3ba7d6f8c817d9f9b4c5094 | 702,003 |
def format_duration(dur: float) -> str:
"""Formats duration (from minutes) into a readable format"""
if float(dur) >= 1.0:
return "{} min".format(int(dur))
else:
return "{} sec".format(int(round(dur * 60))) | 02393e051b751001af9c8092ff64ebcef7596d6f | 702,004 |
import struct
def unpack(structure, data):
"""
Unpack little endian hexlified binary string into a list.
"""
return struct.unpack('<' + structure, bytes.fromhex(data)) | 530cf57b74be1e171a6f0c7ba148bdf73e8a7612 | 702,005 |
def get_recall(indices, targets):
""" Calculates the recall score for the given predictions and targets
Args:
indices (Bxk): torch.LongTensor. top-k indices predicted by the model.
targets (B): torch.LongTensor. actual target indices.
Returns:
recall (float): the recall score
"""
targets = targets.view(-1, 1).expand_as(indices) # (Bxk)
hits = (targets == indices).nonzero()
if len(hits) == 0: return 0
n_hits = (targets == indices).nonzero()[:, :-1].size(0)
recall = n_hits / targets.size(0)
return recall | 63f4d7f36f63d3110c33989b03f264e1fa4aa4ff | 702,007 |
def get_collection_no(row):
"""Get the collection number from an expedition row."""
if row.get('collector_number'):
return row.collector_number
num = row.get('collector_number_numeric_only', '')
verb = row.get('collector_number_verbatim', '')
if verb and len(num) < 2:
return row.collector_number_verbatim
return row.collector_number_numeric_only | a3e92e24a6a5a95651b7ccde183ecc6b083a649a | 702,008 |
def _get_versioned_config(config, version = ""):
"""select version from config
Args:
config: config
version: specified version, default is "".
Returns:
updated config with specified version
"""
versioned_config = {}
versioned_config.update(config)
used_version = config["used_version"]
if version and version in config["versions"]:
used_version = version
versioned_config.update(config["versions"][used_version])
if used_version.startswith("heads/"):
versioned_config["branch"] = used_version.split("/")[1]
elif used_version.startswith("tags/"):
versioned_config["tag"] = used_version.split("/")[1]
versioned_config.pop("versions")
versioned_config.pop("used_version")
return versioned_config | 70528e14148358613d2561c90e741b3f49569136 | 702,009 |
import configparser
from typing import Union
def _prompt_for_option_name (ARG_config_object: configparser.ConfigParser, ARG_section: str) -> Union[str, None]:
"""Prompts the user to enter a valid option name. Checks that option name exists.
Parameters
----------
ARG_config_object : configparser.ConfigParser
The settings, which have been read from `SETTINGS_FILE` by configparser and stored in this configparser.ConfigParser object.
ARG_section : str
A valid section name in the config object (see above).
Returns
-------
str or None
If a string, it's an option name that exists in the given section and config object. If None, the user wishes to exit whatever dialog called this function.
"""
print ()
print (f"SELECTED SECTION: {ARG_section}")
print ("Type the option name of the setting you would like to change, or")
_input = input ("leave blank to exit: ")
_input = _input.strip()
if _input == "":
return None
option_name = _input.lower()
if ARG_config_object.has_option (ARG_section, option_name) is False:
print (f"Sorry, {option_name} is not a valid option name.")
recursive_result = _prompt_for_option_name(ARG_config_object , ARG_section)
return recursive_result
else:
return option_name | b9aea1ba8a19d0c3a104a4e661a4043e9ad33889 | 702,010 |
def find_direct_conflicts(pull_ops, unversioned_ops):
"""
Detect conflicts where there's both unversioned and pulled
operations, update or delete ones, referering to the same tracked
object. This procedure relies on the uniqueness of the primary
keys through time.
"""
return [
(pull_op, local_op)
for pull_op in pull_ops
if pull_op.command == 'u' or pull_op.command == 'd'
for local_op in unversioned_ops
if local_op.command == 'u' or local_op.command == 'd'
if pull_op.row_id == local_op.row_id
if pull_op.content_type_id == local_op.content_type_id] | 5832a41b81cffd7e5c7d1f79472f9c44eaa3127a | 702,011 |
def unknown_id_to_symbol(unknown_id, header="X"):
"""Get the symbol of unknown whose id is |unknown_id|.
:type unknown_id: int
:type header: str
:param unknown_id: The ID of the unknown.
:param header: The symbol header.
:rtype : str
:return: A string that contains the symbol.
"""
# If the |unknown_id| is zero, just returns |PROTECT_HEADER|a.
if unknown_id == 0:
return header + "a"
# Initialize alphabet table.
ch_table = "abcdefghijklmnopqrstuvwxyz"
ch_table_len = len(ch_table)
# Convert decimal to 26 ary.
cur_id = unknown_id
r = ""
while cur_id != 0:
r = ch_table[cur_id % ch_table_len] + r
cur_id = int(cur_id / ch_table_len)
# Return the converted symbol.
return header + r | 53081447eb0c5daf70d1af936337b35bffe4caf0 | 702,012 |
import subprocess
def _get_git_revision_hash():
""" ref: https://stackoverflow.com/questions/14989858/get-the-current-git-hash-in-a-python-script """
return subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode('ascii').strip() | 368d8873df7b06ecdc5cbddb04cfe7fa57e553a2 | 702,013 |
import re
def markdown_to_doxygen(string):
"""Markdown to Doxygen equations"""
long_equations = re.sub(
r"(?<!\\)\$\$(.*?)(?<!\\)\$\$", r"\\f[\g<1>\\f]", string, flags=re.DOTALL
)
inline_equations = re.sub(r"(?<!(\\|\$))\$(?!\$)", r"\\f$", long_equations)
return inline_equations | 2cae07ccb661ef22fab518d4fae4a0cc22868d84 | 702,014 |
import click
def _get_help_record(opt):
"""Re-implementation of click.Opt.get_help_record.
The variant of 'get_help_record' found in Click makes uses of slashes to
separate multiple opts, and formats option arguments using upper case. This
is not compatible with Sphinx's 'option' directive, which expects
comma-separated opts and option arguments surrounded by angle brackets [1].
[1] http://www.sphinx-doc.org/en/stable/domains.html#directive-option
"""
def _write_opts(opts):
rv, _ = click.formatting.join_options(opts)
if not opt.is_flag and not opt.count:
rv += ' <{}>'.format(opt.name)
return rv
rv = [_write_opts(opt.opts)]
if opt.secondary_opts:
rv.append(_write_opts(opt.secondary_opts))
help = opt.help or ''
extra = []
if opt.default is not None and opt.show_default:
extra.append(
'default: %s' % (', '.join('%s' % d for d in opt.default)
if isinstance(opt.default,
(list, tuple)) else opt.default, ))
if opt.required:
extra.append('required')
if extra:
help = '%s[%s]' % (help and help + ' ' or '', '; '.join(extra))
return ', '.join(rv), help | fb8e3f79e46cd046737de4d001357eafb9a1ef5c | 702,016 |
def init(input_mgr, user_data, logger):
"""Initialize the example source tool."""
# Get the selected value from the GUI and save it for later use in the user_data
user_data.val = float(input_mgr.workflow_config["Value"])
# Display info on the selected value
logger.display_info_msg(f"The value selected is {user_data.val}")
# Throw a warning if greater than 0.5
if user_data.val > 0.5:
logger.display_warn_msg(f"The value selected is greater than 0.5")
return True | dd922eea66b61e152675f9a27f6732cb8bd56209 | 702,017 |
from typing import List
def assert_single_whitespace_after_second_semicolon(docstring: List[str]) -> List[str]:
"""
Find the lines conaining prefixes = [":param", ":return", ":raises"].
For those lines make sure that there is only one whitespace after the second semicolon.
:param docstring: list of lines in docstring
:return: list of lines in docstring
"""
prefixes = [":param", ":return", ":raises"]
for i in range(len(docstring)):
line = docstring[i]
for prefix in prefixes:
index = line.find(prefix)
if index != -1:
index_of_second_semicolon = line.find(":", index + len(prefix))
if index_of_second_semicolon != -1:
line_after_second_semicolon = line[index_of_second_semicolon + 1 :]
while line_after_second_semicolon.startswith(" "):
line_after_second_semicolon = line_after_second_semicolon[1:]
if len(line_after_second_semicolon) > 1:
line_after_second_semicolon = (
" "
+ line_after_second_semicolon[0].upper()
+ line_after_second_semicolon[1:]
)
docstring[i] = (
line[: index_of_second_semicolon + 1]
+ line_after_second_semicolon
)
return docstring | 8e3f1a2f67782774e52b424e025e5a2cea1ebfca | 702,018 |
from typing import Dict
def is_graph_equal(lhs_workbench: Dict, rhs_workbench: Dict) -> bool:
"""Checks whether both workbench contain the same graph
Two graphs are the same when the same topology (i.e. nodes and edges)
and the ports at each node have same values/connections
"""
try:
if not set(rhs_workbench.keys()) == set(lhs_workbench.keys()):
raise ValueError()
for node_id, node in rhs_workbench.items():
# same nodes
if not all(
node.get(k) == lhs_workbench[node_id].get(k) for k in ["key", "version"]
):
raise ValueError()
# same connectivity (edges)
if not set(node.get("inputNodes")) == set(
lhs_workbench[node_id].get("inputNodes")
):
raise ValueError()
# same input values
for port_id, port in node.get("inputs", {}).items():
if port != lhs_workbench[node_id].get("inputs", {}).get(port_id):
raise ValueError()
except (ValueError, TypeError, AttributeError):
return False
return True | 02c327cfb364e01f206458a87d6c4561985b42d6 | 702,019 |
import copy
def combine(to_merge, extend_by):
"""Merge nested dictionaries."""
def _combine(to_merge, extend_by):
for key, value in extend_by.items():
if key in to_merge:
if isinstance(to_merge[key], dict):
_combine(to_merge[key], value)
else:
to_merge[key] = value
else:
to_merge[key] = value
to_merge = copy.deepcopy(to_merge)
_combine(to_merge, extend_by)
return to_merge | 69a5713e65bace724370c722155a2677cd50c317 | 702,020 |
def chr22XY(c):
"""Reformats chromosome to be of the form Chr1, ..., Chr22, ChrX, ChrY, etc.
Args:
c (str or int): A chromosome.
Returns:
str: The reformatted chromosome.
Examples:
>>> chr22XY('1')
'chr1'
>>> chr22XY(1)
'chr1'
>>> chr22XY('chr1')
'chr1'
>>> chr22XY(23)
'chrX'
>>> chr22XY(24)
'chrY'
>>> chr22XY("X")
'chrX'
>>> chr22XY("23")
'chrX'
>>> chr22XY("M")
'chrM'
"""
c = str(c)
if c[0:3] == 'chr':
c = c[3:]
if c == '23':
c = 'X'
if c == '24':
c = 'Y'
return 'chr' + c | 13677f728ce8221e9a6966951353deba703f3294 | 702,021 |
import re
def searchLiteral(a_string, patterns):
"""assumes a_string is a string, being searched in
assumes patterns is a list of strings, to be search for in a_string
returns a re span object, representing the found literal if it exists,
else None"""
results = []
for pattern in patterns:
regex = pattern
results.append(re.search(regex, a_string))
return results | fcbbbdb61474e441b5fe0a0d207b0c2c7a0b7da5 | 702,022 |
def get_tourn_golfer_id(tourn_golfers_list, tourn_id, golfer_id):
"""
Helper function to get the tourn_golfer_id
based on the specified tourn_id and golfer_id
"""
for tourn_golfer in tourn_golfers_list:
if tourn_golfer.get_golfer_id() == golfer_id:
if tourn_golfer.get_tourn_id() == tourn_id:
return tourn_golfer.get_tourn_golfer_id()
# tg not found - just return 0
return 0 | ead84142f91289a8786aa57da6c64cc512309ff8 | 702,025 |
import re
def verify_raw_google_hash_header(google_hash: str) -> bool:
"""Verify the format of the raw value of the "x-goog-hash" header.
Note: For now this method is used for tests only.
:param str google_hash: the raw value of the "x-goog-hash" header
:rtype: bool
"""
return bool(re.match(r'(crc32c=[A-Za-z0-9+/=]+),(md5=[A-Za-z0-9+/=]+)', google_hash)) | 187c903c23e0c860e983b2e9b70890a36823c63f | 702,026 |
def stack_operations():
"""Solution to exercise R-6.1.
What values are returned during the following series of stack operations,
if executed upon an initially empty stack? push(5), push(3), pop(),
push(2), push(8), pop(), pop(), push(9), push(1), pop(), push(7), push(6),
pop(), pop(), push(4), pop(), pop().
--------------------------------------------------------------------------
Solution:
--------------------------------------------------------------------------
A stack is a last-in, first-out (LIFO) structure:
1. Initial []
2. push(5) [5]
3. push(3) [5, 3]
4. pop() [5]
5, push(2) [5, 2]
6. push(8) [5, 2, 8]
7. pop() [5, 2]
8. pop() [5]
9. push(9) [5, 9]
10. push(1) [5, 9, 1]
11. pop() [5, 9]
12. push(7) [5, 9, 7]
13. push(6) [5, 9, 7, 6]
14. pop() [5, 9, 7]
15. pop() [5, 9]
16. push(4) [5, 9, 4]
17. pop() [5, 9]
18. pop() [5]
"""
values = [
[],
[5],
[5, 3],
[5],
[5, 2],
[5, 2, 8],
[5, 2],
[5],
[5, 9],
[5, 9, 1],
[5, 9],
[5, 9, 7],
[5, 9, 7, 6],
[5, 9, 7],
[5, 9],
[5, 9, 4],
[5, 9],
[5]
]
return values | d3acdcdd38cf86cf2d94a9c35dec9994a649ef4e | 702,027 |
import hashlib
def get_SHA1(variant_rec):
"""Calculate the SHA1 digest from the ref, study, contig, start, ref, and alt attributes of the variant"""
h = hashlib.sha1()
keys = ['seq', 'study', 'contig', 'start', 'ref', 'alt']
h.update('_'.join([str(variant_rec[key]) for key in keys]).encode())
return h.hexdigest().upper() | 45e1aca002dc2ae972ee0e61c11441c11714c793 | 702,028 |
def reverse_list(head):
"""Fantasic code!"""
new_head = None # ptr on previous item
while head:
head.next, head, new_head = new_head, head.next, head # look Ma, no temp vars!
return new_head | a9a09f30083549aeaeaa4750f66efaf869fce44d | 702,029 |
def mult_vector(vector, coeff):
""" multiplies a 2D vector by a coefficient """
return [coeff * element for element in vector] | 4f404b23ef4f11b162b352735275498811cc6821 | 702,030 |
def get_date_whereclause(date_colname, startdate, enddate):
"""This is to change the date format function to use on the actual queries
sqlite and mysql use different methodnames to do their date arithmetic"""
ret = " %s > '%s' AND %s < '%s' " % (date_colname,startdate.strftime('%Y-%m-%d'),
date_colname, enddate.strftime('%Y-%m-%d'))
return ret | f02d033f3f89eac5ac4d496d5995e87b6d9d0301 | 702,031 |
def newton_rhapson(f, df, x0, epsilon=1e-5):
"""Găsește o soluție a funcției f cu derivata df, aplicând
metoda lui Newton, pornind din punctul x0.
"""
# Primul punct este cel primit ca parametru
prev_x = x0
# Aplicăm prima iterație
x = x0 - f(x0) / df(x0)
# Continuăm să calculăm până avem precizia cerută.
num_steps = 1
while abs(x - prev_x) / abs(prev_x) > epsilon:
x, prev_x = x - f(x) / df(x), x
num_steps += 1
return x, num_steps | 5cd4d3b201d5e0cfd441df494b4203b523f39171 | 702,032 |
def appendToFile(fileName: str, content: str):
"""Writes content to the given file."""
with open(fileName, "a") as f:
f.write(content)
return None | a9e4604fa9404f3c304a40e18ead42a70d99e956 | 702,033 |
import re
def _apply_regex(regex, full_version):
"""
Applies a regular expression to the given full_version and tries to capture
a group
:param regex: the regular expression to apply
:param full_version: the string that the regex will apply
:return: None if the regex doesn't match or the result of the group
"""
match_object = re.match(regex, full_version)
if match_object and len(match_object.groups()) == 1:
subversion = match_object.group(1)
else:
subversion = None
return subversion | 0a053fd716844f4ec1ad166f414e4d1b931434ec | 702,035 |
def indices(a, func):
"""
Get indices of elements in an array which satisfies func
>>> indices([1, 2, 3, 4], lambda x: x>2)
[2, 3]
>>> indices([1, 2, 3, 4], lambda x: x==2.5)
[]
>>> indices([1, 2, 3, 4], lambda x: x>1 and x<=3)
[1, 2]
>>> indices([1, 2, 3, 4], lambda x: x in [2, 4])
[1, 3]
>>> indices([1,2,3,1,2,3,1,2,3], lambda x: x > 2)
[2, 5, 8]
"""
return [i for (i, val) in enumerate(a) if func(val)] | 8c1855cfdbbc11f7b88b23971f03717fc78be27a | 702,036 |
def my_decorated_function(name, value): # ...check_value(fix_name(negate_value(my_decorated_function)))
"""my original function."""
print("name:", name, "value:", value)
return value | fec8ea3f49f4561bdf63245bf5cb9ec284352704 | 702,037 |
import platform
def get_architecture_string():
"""Return a string representing the operating system and the python
architecture on which this python installation is operating (which may be
different than the native processor architecture.."""
return '%s%s' % (platform.system().lower(),
platform.architecture()[0][0:2]) | 1af6d3b0a713dad26a372389d583f7f7e44679e3 | 702,038 |
import re
import html
def escape_text(txt):
"""
Escape text, replacing leading spaces to non-breaking ones and newlines to <br> tag
"""
lines = []
for line in txt.splitlines():
lead_spaces = re.match(r'^\s+', line)
if lead_spaces: # Replace leading spaces with non-breaking ones
lines.append(lead_spaces.end() * ' ' + html.escape(line[lead_spaces.end():], quote=False))
else:
lines.append(html.escape(line, quote=False))
return '<br>'.join(lines) | 6c750fc9f0862a6b8a5739362918bb54ec73ea98 | 702,039 |
import json
def _clean_output_json(output_json: str) -> str:
"""Make JSON output deterministic and nicer to read."""
try:
output = json.loads(output_json)
except json.JSONDecodeError:
raise ValueError(
f"Instead of JSON, output was:\n--- output start ---\n{output_json}\n--- output end ---"
)
return json.dumps(output, indent=2, sort_keys=True) | 0952ed8f8cc34ca2c18aa3d09ca0c81607066332 | 702,040 |
async def get_default_playing(in_guild):
"""Search for a suitable waiting channel on new guild entry
Parameters
----------
in_guild :
Discord Guild to determine default channel for
"""
channels = in_guild.voice_channels
words = ["active","play","stream"]
chans = [c for c in channels if [d for d in words if d in c.name.lower()]] #Find list of channels matching words
chan = chans[0] if chans else channels[0]
return chan.id | ffd8418f1b3f5a32933a67459ab94d3960dd68d6 | 702,041 |
import socket
def check_connection(server, port):
""" Checks connection to server on given port """
try:
sock = socket.create_connection((server, port), timeout=5)
except socket.error:
return False
else:
sock.close()
return True | 7b0b7174e7351c87a907d94012e65898cc38a713 | 702,042 |
def empirical_cdf(values, v):
"""
Returns the proportion of values in ``values`` <= ``v``.
"""
count = 0.0
for idx, v0 in enumerate(values):
if v0 < v:
count += 1
return count / len(values) | 65de22130e87ede7dc637e4140f324bdad6dc31b | 702,043 |
def centerpoint(s):
"""
s is 2-d integer-valued (float or int) array shape
return is a (2-tuple floats)
correct for Jinc, hex transform, 'ff' fringes to place peak in
central pixel (odd array)
pixel corner (even array)
"""
return (0.5*s[0] - 0.5, 0.5*s[1] - 0.5) | ab79301294f59b88e6005f175671d86fb6201534 | 702,044 |
def _calculate_compliance(results):
"""
Calculate compliance numbers given the results of audits
"""
success = len(results.get('Success', []))
failure = len(results.get('Failure', []))
control = len(results.get('Controlled', []))
total_audits = success + failure + control
if total_audits:
compliance = float(success + control) / total_audits
compliance = int(compliance * 100)
compliance = '{0}%'.format(compliance)
return compliance
return None | d0855cd88a0ec88a1b9de2c1ba0372a854f2a9c6 | 702,045 |
def good_turing(corpus, words):
"""
:param corpus:
:param words:
:return:
"""
for w in words:
if w in corpus:
raise ValueError('未登录词有误!')
r_dict = {}
for w in corpus:
if r_dict.get(w) is None:
r_dict[w] = 1
else:
r_dict[w] += 1
n_dict = {}
for r in r_dict.values():
if n_dict.get(r) is None:
n_dict[r] = 1
else:
n_dict[r] += 1
prob_of_words = n_dict[1] / len(words)
total = prob_of_words
temp_prob = {}
for k, v in r_dict:
if v < max(r_dict.values()):
r_star = (r_dict[k] + 1) * n_dict[r_dict[k] + 1] / n_dict[r_dict[k]]
temp_prob[k] = r_star / len(corpus)
else:
temp_prob[k] = r_dict[k] / len(corpus)
total += temp_prob[k]
# 归一化
ret = {}
for w in corpus:
ret[w] = temp_prob[w] / total
for w in words:
ret[w] = prob_of_words / total
return ret | af35ba996d0f0d2236fc8ef4f6f0466a5204520e | 702,046 |
def datatype_percent(times, series):
"""
returns series converted to datatype percent
ever value is calculated as percentage of max value in series
parameters:
series <tuple> of <float>
returns:
<tuple> of <float> percent between 0.0 and 1.0
"""
max_value = max(series)
try:
new_series = tuple((value/max_value for value in series))
return new_series
except ZeroDivisionError:
return (0.0,) * len(series) | 8973829f6ea3425351373097fa90b7d4762a880e | 702,047 |
def lame(E=None, v=None, u=None, K=None, Vp=None, Vs=None, rho=None):
"""
Compute the first Lame's parameter of a material given other moduli.
:param: E: Young's modulus (combine with v, u, or K)
:param v: Poisson's ratio (combine with E, u, or K)
:param u: shear modulus (combine with E, v, or K)
:param K: Bulk modulus (combine with E, v, or u)
:param Vp: Compressional velocity (combine with Vs and rho)
:param Vs: Shear velocity (combine with Vp and rho)
:param rho: Density (combine with Vp and Vs)
"""
if E and v:
L = E*v/((1+v)*(1-2*v))
elif E and u:
L = u*(E - 2*u)/(3*u - E)
elif E and K:
L = 3*K*(3*K-E)/(9*K-E)
elif v and u:
L = 2*u*v/(1-2*v)
elif v and K:
L = 3*K*v/(1+v)
elif u and K:
L = (3*K-2*u)/3
elif Vp and Vs and rho:
L = rho*(Vp**2 - 2*Vs**2)
else:
L = None
return(L) | b8f1d52bac3130b69f75903091d00ce49ba553f8 | 702,048 |
import os
import codecs
def initialize_vocabulary(vocabulary_path):
"""
initialize vocabulary from file.
assume the vocabulary is stored one-item-per-line
"""
characters_class = 9999
if os.path.exists(vocabulary_path):
with codecs.open(vocabulary_path, 'r', encoding='utf-8') as voc_file:
rev_vocab = [line.strip() for line in voc_file]
vocab = {x: y for (y, x) in enumerate(rev_vocab)}
reserved_char_size = characters_class - len(rev_vocab)
if reserved_char_size < 0:
raise ValueError("Number of characters in vocabulary is equal or larger than config.characters_class")
for _ in range(reserved_char_size):
rev_vocab.append('')
# put space at the last position
vocab[' '] = len(rev_vocab)
rev_vocab.append(' ')
return vocab, rev_vocab
raise ValueError("Initializing vocabulary ends: %s" % vocabulary_path) | c63f57299c42914986a17935ca57ccd995500cfe | 702,049 |
from typing import Dict
from typing import Any
import requests
def get_local_status() -> Dict[str, Any]:
"""
Returns a running status if localhost:4040/api/v1/applications is reachable; othe
:return: Dict[str, Any]
"""
idle_cluster_state = {
"url" : "spark://simulated-local-mode-cluster:7077",
"workers" : [ ],
"aliveworkers" : 0,
"cores" : 0,
"coresused" : 0,
"memory" : 0,
"memoryused" : 0,
"activeapps" : [ ],
"completedapps" : [ ],
"activedrivers" : [ ],
"completeddrivers" : [ ],
"status" : "ALIVE"
}
running_cluster_state = {
"url" : "spark://simulated-local-mode-cluster::7077",
"workers" : [ ],
"aliveworkers" : 0,
"cores" : 0,
"coresused" : 0,
"memory" : 0,
"memoryused" : 0,
"activeapps" : [ {
"id" : "app-0-0000",
"starttime" : 0,
"name" : "pyspark-shell",
"cores" : 0,
"user" : "spark-user",
"memoryperslave" : 0,
"submitdate" : "Thu Jan 13 17:47:31 GMT 2022",
"state" : "WAITING",
"duration" : 0
} ],
"completedapps" : [ ],
"activedrivers" : [ ],
"completeddrivers" : [ ],
"status" : "ALIVE"
}
spark_status_url = "http://localhost:4040/api/v1/applications"
try:
r = requests.get(url=spark_status_url)
if r.status_code != 200:
return idle_cluster_state
return running_cluster_state
except:
return idle_cluster_state | 06f79d95e513cf91634345c96334f6af7f385fdf | 702,051 |
def total_points(min_x, max_x, points_per_mz):
"""
Calculate the number of points for the regular grid based on the full width at half maximum.
:param min_x: the lowest m/z value
:param max_x: the highest m/z value
:param points_per_mz: number of points per fwhm
:return: total number of points
:rtype: int
"""
if min_x > max_x:
raise ValueError("min_x > max_x")
if min(min_x, max_x, points_per_mz) <= 0:
raise ValueError("all inputs must be greater than 0")
return int((max_x - min_x) * points_per_mz) + 1 | e0680e386559a9603b3b23b9627bd8648b23e65a | 702,052 |
from functools import reduce
def core_count():
""" count number of cores in the local machine """
with open("/proc/cpuinfo","r") as procinfo:
return reduce(lambda a, b: a + b.startswith("processor"), procinfo, 0) | a9adc9ed3f88d5a8dd0175ffb7508adc14f4717e | 702,053 |
def time_text_to_float(time_string):
"""Convert tramscript time from text to float format."""
hours, minutes, seconds = time_string.split(':')
seconds = int(hours) * 3600 + int(minutes) * 60 + float(seconds)
return seconds | 96d84804aad2cd094901e61373855604d4768894 | 702,054 |
from typing import List
from typing import Dict
def incremental_build(new_content: str, lines: List, metadata: Dict) -> List:
"""Takes the original lines and updates with new_content.
The metadata holds information enough to remove the old unreleased and
where to place the new content
Args:
lines: The lines from the changelog
new_content: This should be placed somewhere in the lines
metadata: Information about the changelog
Returns:
Updated lines
"""
unreleased_start = metadata.get("unreleased_start")
unreleased_end = metadata.get("unreleased_end")
latest_version_position = metadata.get("latest_version_position")
skip = False
output_lines: List = []
for index, line in enumerate(lines):
if index == unreleased_start:
skip = True
elif index == unreleased_end:
skip = False
if (
latest_version_position is None
or isinstance(latest_version_position, int)
and isinstance(unreleased_end, int)
and latest_version_position > unreleased_end
):
continue
if skip:
continue
if (
isinstance(latest_version_position, int)
and index == latest_version_position
):
output_lines.append(new_content)
output_lines.append("\n")
output_lines.append(line)
if not isinstance(latest_version_position, int):
output_lines.append(new_content)
return output_lines | 7bb44732e278b4dd41ef4796b08b3e39548be4ef | 702,055 |
from typing import Tuple
from typing import List
from datetime import datetime
def read_tides(path: str) -> Tuple[List, List]:
"""Read a CO-OPS tide data CSV file and return
a list of times and elevations."""
data = [line.strip() for line in open(path)]
time = []
elevation = []
for line in data[1:]:
line = line.replace('"','').split(',')
time.append(datetime.strptime(line[0] + line[1], '%Y/%m/%d%H:%M'))
elevation.append(float(line[4]))
return time, elevation | fcbe79a5ddedbaccd0bc0a9c317324881a774a78 | 702,057 |
import requests
def get_lovelive_info():
"""
从土味情话中获取每日一句
:return: str,土味情话
"""
print('获取土味情话...')
try:
resp = requests.get('https://api.lovelive.tools/api/SweetNothings')
if resp.status_code == 200:
return resp.text
print('土味情话获取失败。')
except requests.exceptions.RequestException as exception:
print(exception)
# return None
return None | daf73060c1caae261408b817a90ba81d25731f5a | 702,058 |
from typing import Any
import json
def read_json_file(filename: str) -> Any:
"""Read a json file
Parameters
----------
filename : str
The json file
Returns
-------
A json object
Example
-------
>>> import allyoucanuse as aycu
>>> content = aycu.read_json_file("tickets.json")
"""
with open(filename) as f:
return json.load(f) | e1afe013da96adfa5dd0e3ea998dce1323efe231 | 702,059 |
def combine_loss_components(critic_loss_val, actor_loss_val, entropy_val,
actor_loss_weight, entropy_bonus):
"""Combine the components in the combined AWR loss."""
return critic_loss_val + (actor_loss_val * actor_loss_weight) - (
entropy_val * entropy_bonus) | 1a060140aa3e08944d2e3f4cf1e4603c88e16921 | 702,061 |
import json
import logging
def country_filter() -> str:
""" find selected country to gather news from from config.json """
with open('config.json') as config_file:
data = json.load(config_file)
logging.info('Countries for news briefing located')
return data['news_briefing'][0]['country'] | 35cfee9c0239dfef7ad352f2c17c2d3a4d140eb9 | 702,062 |
def filter_in_out_by_column_values (column, values, data, in_out):
"""Include rows only for given values in specified column.
column - column name.
values - list of acceptable values.
"""
if in_out == 'in':
data = data.loc[data[column].isin (values)]
else:
data = data.loc[~data[column].isin (values)]
return data | 191fa20edae7f67b245ad31658297d4a114f59fe | 702,063 |
def intersection(set_1, set_2):
"""realisation of two sets intersection(simple set generator inside)"""
return {i for i in set_1 if i in set_2} | d46a206e6c202343615c3b045a8885d8efd56607 | 702,064 |
def pointer_scope(func):
"""The FDB format has a lot of pointers to structures, so this decorator automatically reads the pointer, seeks to the pointer position, calls the function, and seeks back."""
def wrapper(self, *args, **kwargs):
pointer = kwargs.get("pointer")
if pointer == None:
pointer = self._read_int32()
else:
del kwargs["pointer"]
if pointer == -1:
return
current_pos = self.fdb.tell()
self.fdb.seek(pointer)
result = func(self, *args, **kwargs)
self.fdb.seek(current_pos)
return result
return wrapper | a83abfcd0cb9b641aec3125fb267cc6c6f134884 | 702,066 |
def waitfor(css_selector, text=None, classes=None):
"""
Decorator for specifying elements (selected by a CSS-style selector) to
explicitly wait for before taking a screenshot. If text is set, wait for the
element to contain that text before taking the screenshot. If classes is
present, wait until the element has all classes.
"""
def decorator(method):
if not isinstance(getattr(method, '__waitfors', None), list):
setattr(method, '__waitfors', [])
method.__waitfors.append({
'css_selector': css_selector,
'text': text,
'classes': classes
})
return method
return decorator | 13130146441f20cfd424e52e46f2f2e3a7e33d3f | 702,067 |
import argparse
def parse_arguments():
""" Parse arguments passed to the script """
parser = argparse.ArgumentParser(description='File uploader to Firebase Storage.')
parser.add_argument('config', help='Path of Firebase configuration file.')
parser.add_argument('--filetoken', dest='filetoken', default=None,
help='File containing the auth token. Required when Storage has restrictive rules')
parser.add_argument('remote_path', help='Storage path where files have to be uploaded.')
parser.add_argument('files', nargs='+', help='File or files to upload.')
return parser.parse_args() | 1ce3117c64667aa5d50a88bfb64d03bce8b14995 | 702,068 |
def get_requested_countries(countries, country_names):
"""
gets requested countries for visualization and info
:param countries: list of all available countries for research
:param country_names: list
:return: (list, str)
"""
print(country_names)
# gather the requested information
chosen_names = []
requested_countries = []
name = ''
while name != 'next':
name = input('What countries would you like to investigate?'
' ("all" to add all, type "next" to proceed) ')
while name not in country_names and name != 'next' and name != 'all':
name = input('What countries would you like to investigate?'
' ("all" to add all, type "next" to proceed) ')
chosen_names.append(name)
# add chosen countries
for name in chosen_names:
for country in countries:
if name == country.name:
requested_countries.append(country)
elif name == 'all':
requested_countries = countries
break
# ask what info would the user like to learn
commands = ('gdp', 'investment_inflows', 'investment_outflows',
'manufacturing')
print(commands)
info = input('What would you like to visualize? ')
while info not in commands:
info = input('What would you like to visualize? ')
return requested_countries, info | b96f3b001e5a421c333c366f3cdca656d97fe933 | 702,069 |
def retrieve_plain(monitor, object_string):
"""Retrieves the request object as-is (doesn't apply any modification). This
is valid only for objects which are single values (items from a tensor)
:param monitor: either a training or evaluation monitor
:param object_string: string to identify the object to retrieve
:returns: the data for the given monitor directly
"""
return getattr(monitor, object_string) | 7fc5042422ae70933691202718036703554cff3e | 702,070 |
def transpose(table):
"""
Returns a copy of table with rows and columns swapped
Example:
1 2 1 3 5
3 4 => 2 4 6
5 6
Parameter table: the table to transpose
Precondition: table is a rectangular 2d List of numbers
"""
# LIST COMPREHENSION
#return [[row[i] for row in table] for i in range(len(table[0]))]
# ACCUMULATOR PATTERN
# Find the size of the (non-ragged) table
numrows = len(table)
numcols = len(table[0]) # All rows have same no. cols
# Build the table
result = [] # Result accumulator
for m in range(numcols):
# Make a single row
row = [] # Single row accumulator
for n in range(numrows):
row.append(table[n][m])
#Add the result to the table
result.append(row)
return result | 2a393a9e3606022d945454da55fd68543e59476b | 702,071 |
def memsizeformat(size):
"""Returns memory size in human readable (rounded) form.
"""
if size > 1048576: # 1024**2
return "{0} GB".format(size / 1048576)
elif size > 1024:
return "{0} MB".format(size / 1024)
else:
return "{0} KB".format(size) | 0cd812d83bd85b1e2690e0404a4eb833e2e9824f | 702,072 |
def flip_dataframe(df, new_colname='index'):
"""Flips table such that first row becomes columns
Args:
df (DataFrame): Data frame to be flipped.
new_colname (str): Name of new column. Defaults to 'index'.
Returns:
DataFrame: flipped data frame.
"""
colnames = [new_colname] + df.iloc[:, 0].tolist()
df = df.T.reset_index()
df.columns = colnames
df = df.iloc[1:, :]
return df | 3a7c733644e2c67398a511c9dea7fa80845bbecf | 702,073 |
def ca65_bytearray(s):
"""Convert a byteslike into ca65 constant byte statements"""
s = [' .byt ' + ','.join("%3d" % ch for ch in s[i:i + 16])
for i in range(0, len(s), 16)]
return '\n'.join(s) | 8bdc868cc659e6b99f01449c6bf41884c0635c14 | 702,074 |
import os
def convert_to_pdf(input_path):
"""
Use external tools to convert the powerpoint file to a pdf.
Returns:
path of converted file
"""
path, extension = os.path.splitext(input_path)
if extension not in ['.ppt', '.pptx']:
raise ValueError("{0} not a valid powerpoint extension".format(extension))
renamed_path = path + ".pdf"
# unoconv doesn't support writing to a file, so pipe to a file
print("Powerpoint file detected, converting...")
os.system("unoconv -f pdf --stdout \"{0}\" > \"{1}\"".format(input_path, renamed_path))
print("converted from {0} to {1}".format(input_path, renamed_path))
return renamed_path | aef761b559d32d6f75790cf41d07f80fb7933308 | 702,075 |
def is_response_paginated(response_data):
"""Checks if the response data dict has expected paginated results keys
Returns True if it finds all the paginated keys, False otherwise
"""
try:
keys = list(response_data.keys())
except AttributeError:
# If we can't get keys, wer'e certainly not paginated
return False
return set(keys) == set([u'count', u'next', u'previous', u'results']) | 521c28c1d6e29e5785b3bcbd5d2604210b3a3874 | 702,076 |
def create_metadata_dict():
"""A Python dictionary will be created to hold the relevant metadata.
:return: dictionary with keys for the relevant metadata
:rtype: dict
"""
metadata = {'Directory': None,
'Filename': None,
'Extension': None,
'ImageType': None,
'AcqDate': None,
'SizeX': None,
'SizeY': None,
'SizeZ': 1,
'SizeC': 1,
'SizeT': 1,
'SizeS': 1,
'SizeB': 1,
'SizeM': 1,
'isRGB': False,
'isMosaic': False,
'czi_size': None,
'czi_dims': None,
'czi_dims_shape': None,
'ObjNA': [],
'ObjMag': [],
'ObjID': [],
'ObjName': [],
'ObjImmersion': [],
'TubelensMag': [],
'ObjNominalMag': [],
'XScale': None,
'YScale': None,
'ZScale': None,
'XScaleUnit': None,
'YScaleUnit': None,
'ZScaleUnit': None,
'DetectorModel': [],
'DetectorName': [],
'DetectorID': [],
'DetectorType': [],
'InstrumentID': [],
'Channels': [],
'ChannelNames': [],
'ChannelColors': [],
'ImageIDs': [],
'bbox_all_scenes': None,
'bbox_all_mosaic_scenes': None,
'bbox_all_mosaic_tiles': None,
'bbox_all_tiles': None
}
return metadata | 1cb7a42e21df76c13eed3fe905030bd55c7f4af9 | 702,077 |
def compute_power(rawvolts, rawamps):
"""
Compute the power. Looks trivial, but I'm gonna implement
smoothing later.
"""
power = rawvolts * 1.58
power_low = rawvolts * 1.51
power_high = rawvolts * 1.648
return power, power_low, power_high | 9202e456d4655de12ec5608011e80647cf62f7ab | 702,078 |
import argparse
def arguments():
""" Builds a generic argparse
:return: parser
"""
workdir = '/opt/nuvlabox/installation'
parser = argparse.ArgumentParser(description='NuvlaBox Agent')
parser.add_argument('--nuvlabox-installation-trigger-json', dest='nb_trigger_content', default=None, metavar='JSON',
help="JSON content, as a string, of the NuvlaBox installation USB trigger file")
parser.add_argument('--nuvlabox-installation-dir', dest='nb_workdir', default=workdir, metavar='PATH',
help="Location on the filesystem where to keep the NuvlaBox Engine installation files")
return parser.parse_args() | 70c4a81131c5e19a43271905e4f7b5623715cfd4 | 702,079 |
import csv
def read_aa_losses(filename):
"""
Read AA losses from data file. (assume fixed structure...)
"""
aa_losses = {}
with open(filename, 'r') as f:
reader = csv.reader(f, delimiter=',')
next(reader) # skip headers
for line in reader:
if len(line) == 0:
continue
aa_id = line[1]
aa_mono = float(line[4])
aa_avg = float(line[5])
aa_losses[aa_id.lower()] = (aa_mono, aa_avg)
return aa_losses | b1ba8349d01d43112ef67436fa2bb09d3bed768c | 702,080 |
def add(M1, M2):
"""
Returns a matrix Q, where Q[i][j] = M1[i][j] + M2[i][j].
M2 is replaced by Q.
"""
m = len(M1)
n = len(M1[0])
for p in range(m):
for q in range(n):
M2[p][q] = M2[p][q] + M1[p][q]
return M2 | ac86e9109f6287cde062392992bf64dbf49614f5 | 702,081 |
import sys
def azure_pipelines_broken():
"""Azure pipelines has multiple major versions of python that are broken.
The fixes for this will take a month since the regression was
first noted since they have to rebuild thier images and roll them
out which apparently takes a while (which makes me wonder what
they do with major CVES), and the work arounds are deleting their
python installations, downloading a new installation, and using
that.. wtf.
Note. This is the third major regression in pipelines in less than
30 days that has broken ci for days. So i choose not to mince words.
As an alternative while we look for alternatives, this checks for tests
known failing with broken python installations and skips them.\
"""
return (sys.version_info.major, sys.version_info.minor) in ((2, 7), (3, 7)) | 1550835cf548c97fd46f6faf5f00444a46f22649 | 702,082 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.