content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
import os
def get_all_source_files(arr=None, prefix="."):
"""Return source files."""
if arr is None:
arr = []
if not os.path.isdir(prefix):
# assume a file
arr.append(prefix)
return arr
for fx in os.listdir(prefix):
# pylint: disable=too-many-boolean-expressions
if (fx.startswith(".") or fx.startswith("pcre-") or fx.startswith("32bit")
or fx.startswith("mongodb-") or fx.startswith("debian")
or fx.startswith("mongo-cxx-driver") or fx.startswith("sqlite") or "gotools" in fx
or fx.find("mozjs") != -1):
continue
# pylint: enable=too-many-boolean-expressions
def is_followable_dir(prefix, full):
"""Return True if 'full' is a followable directory."""
if not os.path.isdir(full):
return False
if not os.path.islink(full):
return True
# Follow softlinks in the modules directory (e.g: enterprise).
if os.path.split(prefix)[1] == "modules":
return True
return False
full = prefix + "/" + fx
if is_followable_dir(prefix, full):
get_all_source_files(arr, full)
else:
if full.endswith(".cpp") or full.endswith(".h") or full.endswith(".c"):
full = full.replace("//", "/")
arr.append(full)
return arr | 7dd0cf00e90b1e403ce34b8b158b3e37201b0822 | 702,419 |
def count_all_questions_yes(group_input: str) -> int:
"""count questions all group members answered with yes"""
list_of_sets = [set(line.strip()) for line in group_input.split("\n")]
return len(set.intersection(*list_of_sets)) | ba9ce06b4bdbd09ff871114a74de53ccdf60dab3 | 702,420 |
def get_cell_numbers(contained):
"""Retrieve non-overlapping cell numbers from the output of `get_overlapping`.
None may appear at the ends of the output, indicating that the corresponding
target cells are not overlapping with any source cells. These should be ignored
when regridding.
Cell numbers of 0 indicate that the corresponding target cells need to be regridded
in combination with the previous non-zero cell number target cell.
Returns:
cell_numbers (list): The number of cells corresponding to the source
dimension, as described above.
overlap (bool): If True, this indicates that for at least one location, there
is an overlap of depth 1 between adjacent operations.
"""
cell_numbers = []
overlap = False
for prev_elements, elements in zip([None] + contained[:-1], contained):
cell_number = None
if (
prev_elements is not None
and elements
and prev_elements
and elements[0] == prev_elements[-1]
):
overlap = True
cell_number = -1
if elements:
if cell_number is None:
cell_number = 0
cell_number += elements[-1] - elements[0] + 1
cell_numbers.append(cell_number)
return cell_numbers, overlap | b0bf32676ec7bfa73e04d3afd7053675f95d4abd | 702,421 |
def get_mos(da, da_peak_times):
"""
Takes an xarray DataArray containing veg_index values and calculates the vegetation
values (time not available) at middle of season (mos) for each timeseries per-pixel.
The middle of season is the mean vege value and time (day of year) in the timeseries
at 80% to left and right of the peak of season (pos) per-pixel.
Parameters
----------
da: xarray DataArray
A two-dimensional or multi-dimensional array containing an DataArray of veg_index
and time values.
da_peak_times: xarray DataArray
An xarray DataArray type with an x and y dimension (no time). Each pixel must be
the time (day of year) value calculated at peak of season (pos) prior.
Returns
-------
da_mos_values : xarray DataArray
An xarray DataArray type with an x and y dimension (no time). Each pixel is the
veg_index value detected at the peak of season (pos).
"""
# notify user
print('Beginning calculation of middle of season (mos) values (times not possible).')
# get left and right slopes values
print('> Calculating middle of season (mos) values.')
slope_l = da.where(da['time.dayofyear'] <= da_peak_times)
slope_r = da.where(da['time.dayofyear'] >= da_peak_times)
# getupper 80% values in positive slope on left and right
slope_l_upper = slope_l.where(slope_l >= (slope_l.max('time') * 0.8))
slope_r_upper = slope_r.where(slope_r >= (slope_r.max('time') * 0.8))
# get means of slope left and right
slope_l_means = slope_l_upper.mean('time')
slope_r_means = slope_r_upper.mean('time')
# combine left and right veg_index means
da_mos_values = (slope_l_means + slope_r_means) / 2
# convert type
da_mos_values = da_mos_values.astype('float32')
# rename vars
da_mos_values = da_mos_values.rename('mos_values')
# notify user
print('> Success!\n')
#return da_mos_values
return da_mos_values | 510452f4e89e2e26107f4c8bc9cc7617f7b56b61 | 702,422 |
import re
def tag_validate(tags, case_ids, plan_id):
"""Tests tag checking func"""
error_msg = None
tag_id = re.findall('ID_\d+|id_\d+', str(tags))
if not tag_id:
error_msg = "Wrong Tag Format:\
\nPls add tag 'ID_case_id' to this case."
elif int(tag_id[0][3:]) not in case_ids:
error_msg = "Wrong Tag Format:\
\ncase %s don't belong to plan %s" % (tag_id[0][3:], plan_id)
return error_msg | 3428426b480f2515ca08663f94dd0f8073ed8d18 | 702,423 |
def get_clean_urls(raw_urls):
"""
Known problems so far:
https vs http
https://www.test.de vs https://test.de
https://www.test.de/something vs https://www.test.de/something#something
https://www.test.de/something.html vs https://www.test.de/something.html?something
"""
cleaned_urls = []
for url in raw_urls:
# Removing same html links with anchors (#) or question mark (?)
url = url.split('#')[0].split('?')[0]
# http vs https
url = url.replace('http:', 'https:')
# www
if 'www' not in url[:12]:
url = url.replace('https://', 'https://www.')
cleaned_urls.append(url)
return list(set(cleaned_urls)) | 19e238ff382c7c3dc83b779d7d82c59ff82820e9 | 702,425 |
from importlib.util import find_spec
def _pytest_has_xdist() -> bool:
"""
Check if the pytest-xdist plugin is installed, providing parallel tests
"""
# Check xdist exists without importing, otherwise pytests emits warnings
return find_spec("xdist") is not None | 77cc6d04d21f76b35b183fc4e9ebc2bf6824b744 | 702,426 |
def makedist(dist_type, *pars, **kwards):
"""
Creates a distribution class from scipy continuous distributions
See https://docs.scipy.org/doc/scipy/reference/stats.html.
Parameters
----------
dist_type: String -> Type of the distribution (see the scipy documentation)
*pars and **kwards: Statistical parameters and its values
Return
------
dist: Distribution class
"""
a = 'sst.'
b = dist_type
c = a + b
Scipy_stats_Obj = eval(c)
dist = Scipy_stats_Obj(*pars, **kwards)
return(dist, dist_type) | b9fb48e80d51cbeac5977d3e469ea14854f94def | 702,427 |
import importlib
def build_model(model_name, weights_file=None, **kwargs):
"""Build the desired model."""
mod = importlib.import_module("."+model_name, __name__)
model = mod.build_model(**kwargs)
if weights_file:
try:
model.load_weights(weights_file, by_name=True)
print("Loaded existing model:", weights_file)
except Exception as e: # pylint: disable=broad-except
print("Error loading model:", e)
return model | 23cb3d093b435dc32ecefa8b1415d2c5d4cbc4dd | 702,429 |
def solve(_n, tree):
"""
Given a list of list of tokens:
. (empty), or # (tree), compute the
number of trees one would encounter
if one traverses the 2D grid along
a slope of (3, 1).
:param _n: The number of rows in the
2D grid.
:param tree: The 2D grid as a list of list.
:return: The number of trees encountered on
a traversal along the slope (3, 1).
"""
_i, _j = 0, 0
count = 0
_col = len(tree[0])
while _i + 1 < _n:
_j = (_j + 3) % _col
_i += 1
if tree[_i][_j] == "#":
count += 1
return count | b4dbbc5c62d5b680242997e22c7a84d4ad583848 | 702,430 |
def _getNodeType(node): # {{{
""" return NodeType as String """
if node.nodeType == node.ELEMENT_NODE : return "ELEMENT_NODE"
elif node.nodeType == node.ATTRIBUTE_NODE : return "ATTRIBUTE_NODE"
elif node.nodeType == node.TEXT_NODE : return "TEXT_NODE"
elif node.nodeType == node.CDATA_SECTION_NODE : return "CDATA_SECTION_NODE"
elif node.nodeType == node.ENTITY_NODE : return "ENTITY_NODE"
elif node.nodeType == node.PROCESSING_INSTRUCTION_NODE : return "PROCESSING_INSTRUCTION_NODE"
elif node.nodeType == node.COMMENT_NODE : return "COMMENT_NODE"
elif node.nodeType == node.DOCUMENT_NODE : return "DOCUMENT_NODE"
elif node.nodeType == node.DOCUMENT_TYPE_NODE : return "DOCUMENT_TYPE_NODE"
elif node.nodeType == node.NOTATION_NODE : return "NOTATION_NODE"
return "UKNOWN NODE" | 70bda4da9f991f17602ba2992cb6c11f19141eac | 702,431 |
import math
def get_margin(home_team, away_team, home_team_score, away_team_score):
"""
Get the multiplier for the margin of victory
"""
goal_differential = home_team_score - away_team_score
return max(1, math.log(
abs(goal_differential - .85 * ((home_team.elo - away_team.elo)/100)) + math.e - 1)
) | 0e54ff280e8a045230b221128539a9a6c054625f | 702,432 |
def create_scales(options):
"""
Creates the scales for imfil.m. Nothing much here right now, but
custom scales ... are in the near future.
dscal = create_scales(options)
C. T. Kelley, September 15, 2008
This code comes with no guarantee or warranty of any kind."""
custom_scales = options.custom_scales
mcs = len(custom_scales)
if mcs > 0:
dscal = custom_scales
else:
step = options.scale_step
start = options.scale_start
depth = options.scale_depth
# Do some error checking. Warn or complain as needed.
if start > depth:
raise ValueError('imfil_create_scales: error in scales, start > depth')
# scale reduces with power of 2
dscal = [step**-x for x in range(start, depth+1)]
return dscal | c0e42bec16761a83a532c7799622d9b35ee72367 | 702,433 |
def _sbtype_has_field(sbtype, field_name):
"""Recursive helper to have has_field search up the inheritance hierarchy."""
for f in sbtype.fields:
if f.name == field_name:
return True
for b in sbtype.bases:
if _sbtype_has_field(b.type, field_name):
return True
for b in sbtype.vbases:
if _sbtype_has_field(b.type, field_name):
return True
return False | 8f2ec709214c9080b96acf267c66f1671fa47da3 | 702,434 |
def t_s(n, costheta):
"""Fresneltransmittance for two interfaces.
**Arguments:**
- **n**: iterable with two entries for (n_0, n_1)
- **theta:** iterable with two entries for (theta_0, theta_1)
"""
i, j = 0, 1
a = 2 * n[i] * costheta[i]
b = n[i] * costheta[i] + n[j] * costheta[j]
return a/b | ae74b459ce0bb9172ed5d66c657ddba3065f4dd7 | 702,435 |
import json
import os
import collections
def get_tf_config():
"""Get configuration from TF_CONFIG environment variable.
"""
tf_config = json.loads(os.getenv('TF_CONFIG', '{}'))
if not tf_config:
return None
task = tf_config['task']
cluster = tf_config['cluster']
task_type = task['type']
task_id = int(task['index'])
tf_config_type = collections.namedtuple(
"TfConfig", ["task_type", "task_id", "cluster"])
return tf_config_type(task_type, task_id, cluster) | 376d034fe6790482c2a26e355348d932d0f33f43 | 702,436 |
def bin_bucket_sort(arr):
"""
Binary bucket sort / 2-Radix sort
Time: O(NLog2N)
Space: O(N)
input: 1D-list array
output: 1D-list sorted array
"""
bucket = [[], []]
aux = list(arr)
flgkey = 1
while True:
for ele in aux:
bucket[int(bool(ele & flgkey))].append(ele)
if bucket[0] == [] or bucket[1] == []:
return aux
aux = list(bucket[0]) + list(bucket[1])
flgkey <<= 1
bucket = [[], []] | 8945ef31d5705d1462ce71ed6447bcc8d76e4665 | 702,437 |
def tonum(s):
"""Converts a string representation of a decimal, hexadecimal, or binary number to a number value or None."""
if type(s) in (int, float):
return s
if s is None:
return 0
base = 10
if isinstance(s, str):
if "x" in s:
base = 16
elif "b" in s:
base = 2
try:
return int(s, base)
except ValueError:
return 0 | 4878a5619fcd96897f5bec6167bb8f0b4539588a | 702,438 |
import sys
def handle_response(response, display=False):
"""
Dispatch response based on scripting response or event.
"""
def handle_script_response(response, display=False):
if display:
sys.stderr.write(
u'Scripting request submitted with request id: {}\n'.format(
response.operation_id))
def handle_script_plan_notification(response, display=False):
if display:
sys.stderr.write(
u'Scripting request: {} plan: {} database objects\n'.format(
response.operation_id, response.count))
def handle_script_progress_notification(response, display=False):
if display:
sys.stderr.write(
u'Scripting progress: Status: {} Progress: {} out of {} objects scripted\n'.format(
response.status, response.completed_count, response.total_count))
def handle_script_complete(response, display=False):
if response.has_error:
# Always display error messages.
sys.stdout.write(
u'Scripting request: {} encountered error: {}\n'.format(
response.operation_id, response.error_message))
sys.stdout.write(u'Error details: {}\n'.format(response.error_details))
elif display:
sys.stderr.write(
u'Scripting request: {} completed\n'.format(response.operation_id))
response_handlers = {
u'ScriptResponse': handle_script_response,
u'ScriptPlanNotificationEvent': handle_script_plan_notification,
u'ScriptProgressNotificationEvent': handle_script_progress_notification,
u'ScriptCompleteEvent': handle_script_complete}
response_name = type(response).__name__
if response_name in response_handlers:
return response_handlers[response_name](response, display) | a254ffc5084d8b0d2eaef255bbd122a5a10ec8f2 | 702,440 |
import socket
def hostname():
"""
Check the system's hostname
:rtype: str
:return: system hostname
"""
return socket.gethostname().split('.')[0] | 438ccd56367565e0cf0c9e8783bc73cb4507e83b | 702,441 |
import csv
def import_data(file):
"""
Imports instruction data from a .csv file, at the specified filepath
:param file: file
File object, to be read by the CSV reader
:return: list
A list of dictionaries, per row in the file, with the keys specified in the headers list
below
"""
headers = [
'entity', 'buy_sell', 'agreed_fx', 'currency', 'instr_date', 'settle_date', 'units', 'ppu',
]
# Create CSV reader object, using header schema defined to generate dictionary
csv_reader = csv.DictReader(file, headers)
# Create list of dictionaries from reader
data_rows = [file_row for file_row in csv_reader]
return data_rows | 472f47c2e2111d5ce9a9d20e1ac447a504941aa2 | 702,442 |
import re
def filter_data(data_frame):
"""
Process string value
:param data_frame: raw data in data frame type
:return: data frame
"""
data_frame[data_frame.columns[len(data_frame.columns) - 2]] = \
data_frame[data_frame.columns[len(data_frame.columns) - 2]].apply(lambda x: int("".join(re.findall(r'\d+', x))))
data_frame[data_frame.columns[len(data_frame.columns) - 1]] = \
data_frame[data_frame.columns[len(data_frame.columns) - 1]].apply(lambda x: int(x, 16))
return data_frame | 75e3a40c0c1d06db7da71e8b6ddb0001c5933634 | 702,443 |
from typing import Tuple
from typing import List
from typing import Any
def _extract_bracket_params(meta_type: str) -> Tuple[str, List[Any]]:
"""
Gets parameters from the string representation of the type
Args:
meta_type (str): The string name of the metadata type
Returns:
Tuple[str, List[Any]]: A tuple, first arg is a string of the type
name only and then the second value is a list of values (if any)
inside the brackets of the meta_type. e.g. "int64" returns ("int64", [])
and "decimal128(1,2)" returns ("decimal128", [1, 2])
"""
is_decimal_type = meta_type.startswith("decimal128")
is_binary_type = meta_type.startswith("binary")
if "(" in meta_type:
attr_name, value_str = meta_type.split("(", 1)
value_str = value_str.split(")")[0]
values = value_str.split(",")
if not any([bool(v) for v in values]):
values = []
# cast input to int for specific types
if (is_decimal_type or is_binary_type) and values:
values = [int(v.strip()) for v in values]
else:
attr_name = meta_type
values = []
return attr_name, values | 38fc3872c18bb788a54d50b09a36cf3f5925550e | 702,444 |
import hashlib
def calc_local_file_md5_sum(path):
"""
Calculate and return the MD5 checksum of a local file
Arguments:
path(str): The path to the file
Returns:
str: The MD5 checksum
"""
with open(path, "rb") as file_to_hash:
file_as_bytes = file_to_hash.read()
return hashlib.md5(file_as_bytes).hexdigest() | 78020e86a6d9a6939de6b9050d34fca0d482aab4 | 702,445 |
def emit_compare(field_name, value, session, model):
"""Emit a comparison operation comparing the value of ``field_name`` on ``model`` to ``value``."""
property = getattr(model, field_name)
return property == value | a9ad880951f87f488b12c4ce7c38c5e0e463a798 | 702,446 |
def unsigned32(i):
"""cast signed 32 bit integer to an unsigned integer"""
return i & 0xFFFFFFFF | 1d4e06406d3ee7ce7d8f5cefd28955f135059917 | 702,448 |
import tkinter as tk
from tkinter import filedialog
import os
def ask_path(folder_flag=True, multiple_files_flag=False):
"""Makes a tkinter dialog for choosing the folder if folder_flag=True
or file(s) otherwise. For multiple files the multiple_files_flag should
be True.
"""
# This method is almost never used, so the required imports are locally called
root = tk.Tk()
root.withdraw()
path = os.getcwd()
if folder_flag: # Open folder
path = filedialog.askdirectory(parent=root, initialdir=path, title='Please select directory')
else: # Open file
if multiple_files_flag:
path = filedialog.askopenfilenames(parent=root, initialdir=path, title='Please select data files')
path = root.tk.splitlist(path)
else:
path = filedialog.askopenfilename(parent=root, initialdir=path, title='Please select data file')
root.destroy()
return path | 675f36f34a97a3445d35d1e7a45ee71e60a7ea48 | 702,449 |
def has_and_not_none(obj, name):
"""
Returns True iff obj has attribute name and obj.name is not None
"""
return hasattr(obj, name) and (getattr(obj, name) is not None) | 0d68a9b01d56ba056768d06a88c68b4bd5bbd4d2 | 702,450 |
def _qname_matches(tag, namespace, qname):
"""Logic determines if a QName matches the desired local tag and namespace.
This is used in XmlElement.get_elements and XmlElement.get_attributes to
find matches in the element's members (among all expected-and-unexpected
elements-and-attributes).
Args:
expected_tag: string
expected_namespace: string
qname: string in the form '{xml_namespace}localtag' or 'tag' if there is
no namespace.
Returns:
boolean True if the member's tag and namespace fit the expected tag and
namespace.
"""
# If there is no expected namespace or tag, then everything will match.
if qname is None:
member_tag = None
member_namespace = None
else:
if qname.startswith('{'):
member_namespace = qname[1:qname.index('}')]
member_tag = qname[qname.index('}') + 1:]
else:
member_namespace = None
member_tag = qname
return ((tag is None and namespace is None)
# If there is a tag, but no namespace, see if the local tag
# matches.
or (namespace is None and member_tag == tag)
# There was no tag, but there was a namespace so see if the namespaces
# match.
or (tag is None and member_namespace == namespace)
# There was no tag, and the desired elements have no namespace, so check
# to see that the member's namespace is None.
or (tag is None and namespace == ''
and member_namespace is None)
# The tag and the namespace both match.
or (tag == member_tag
and namespace == member_namespace)
# The tag matches, and the expected namespace is the empty namespace,
# check to make sure the member's namespace is None.
or (tag == member_tag and namespace == ''
and member_namespace is None)) | 66aa9272fd6e4a6e281d39f03dd63acabad0bbe7 | 702,451 |
from typing import Optional
from typing import Union
import subprocess
def run_local(command: str, str_output: Optional[bool] = False) -> Union[int, str]:
"""Runs a command locally and captures output."""
print(command)
if str_output:
process = subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
universal_newlines=True
)
return process.communicate()[0].replace("\n", " ")
return subprocess.call(command, shell=True) | fc56e2106a4f4104c30ec2f4a3252b8e43e8bdf3 | 702,452 |
def get_chombo_box_extent(box, space_dim):
"""
Parse box extents from Chombo HDF5 files into low and high limits
Parameters
----------
box : List
Chombo HDF5 format box limits,
e.g. [x_lo, y_lo, x_hi, y_hi] = [0,0,1,1]
space_dim : int
Number of spatial dimensions
Returns
-------
lo, hi : List
Low and high limits, [x_lo, y_lo, ...], [x_hi, y_hi, ...]
e.g [0,0], [1,1]
"""
lo = [box[i] for i in range(space_dim)]
hi = [box[i] for i in range(space_dim, 2 * space_dim)]
return lo, hi | d72b409a96c8a1936f456d87a341fba22ee9f97e | 702,453 |
def get_raw_pdb_filename_from_interim_filename(interim_filename, raw_pdb_dir):
"""Get raw pdb filename from interim filename."""
pdb_name = interim_filename
slash_tokens = pdb_name.split('/')
slash_dot_tokens = slash_tokens[-1].split(".")
raw_pdb_filename = raw_pdb_dir + '/' + slash_tokens[-2] + '/' + slash_dot_tokens[0] + '.' + slash_dot_tokens[1]
return raw_pdb_filename | 084239659220ea65ae57a006c8ce28df73b2fd5e | 702,454 |
def array_offset(x):
"""Get offset of array data from base data in bytes."""
if x.base is None:
return 0
base_start = x.base.__array_interface__["data"][0]
start = x.__array_interface__["data"][0]
return start - base_start | b383a91790b06ffb1b5b976b9575efe637fa47a6 | 702,455 |
import os
import re
def get_flags(source):
"""Gets flags from a source file.
Args:
source (str): Path to the source file (could be any extension).
Returns:
list[dict[str, _]]: List of maps with keys "type", "name", "default", and "descr" for the
respective fields corresponding to the flag.
"""
flags = []
_, ext = os.path.splitext(source)
if ext == ".py":
delim = "flags.DEFINE_"
else:
delim = "DEFINE_"
split_comma_regex = r",(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)"
cppflag_to_pygflag = {"int32": "integer", "double": "float", "bool": "boolean"}
with open(source) as f:
lines = f.readlines()
flag_lines = "".join(lines).replace("\n", "").split(delim)[1:]
for flag_line in flag_lines:
flag_type, flag_def = flag_line.split("(", 1)
flag_contents = re.compile(split_comma_regex).split(flag_def)
NUM_FLAG_FIELDS = 3
if len(flag_contents) < NUM_FLAG_FIELDS:
continue
flag = {}
flag["type"] = flag_type
flag["name"] = flag_contents[0].strip().replace('"', "")
flag["default"] = flag_contents[1].strip()
flag["descr"] = flag_contents[2].rsplit(")", 1)[0].strip()
if flag["type"] in cppflag_to_pygflag:
flag["type"] = cppflag_to_pygflag[flag["type"]]
if flag["type"] == "boolean":
flag["default"] = False if (flag["default"] == "false") else True
elif flag["type"] == "integer":
try:
flag["default"] = int(flag["default"])
except Exception:
pass
elif flag["type"] == "float":
try:
flag["default"] = float(flag["default"])
except Exception:
pass
flags.append(flag)
return flags | 7cb7dc28b333154904619887dba5d21140141809 | 702,456 |
def update_global_variable():
"""
修改全集变量(global关键字)
:return:
"""
global count
count = 10
return count | 69c91bce24fc77dc731e05178849528e59bae4ba | 702,457 |
def get_non_lib(functions):
"""
Get all non-library functions
@param functions: List of db_DataTypes.dbFunction objects
@return: a subset list of db_DataTypes.dbFunction objects that are not library functions.
"""
return [f for f in functions if not f.is_lib_func] | 7f536ff98d647ba5e497b8550bc2497ef45e814b | 702,459 |
def _check_duplicates(data, name):
"""Checks if `data` has duplicates.
Parameters
----------
data : pd.core.series.Series
name : str
Name of the column (extracted from geopandas.GeoDataFrame) to check duplicates.
Returns
-------
bool : True if no duplicates in data.
"""
if data.duplicated().any():
duplicates = data[data.duplicated(keep=False)]
raise ValueError(f"{name} cannot contain duplicate values, found {duplicates}")
return True | 429ce8d092b3a39fc44eeca91d593db22fe7364d | 702,460 |
def _has_textframe(obj):
""" Check if placeholder has TextFrame """
return hasattr(obj, 'TextFrame') and hasattr(obj.TextFrame, 'TextRange') | 087e6df38e55d99637e8e7ea998257f323d6d141 | 702,461 |
def get_mapping(combinable_list):
"""Determine the mapping from acceptance_id to the register id that can be used
to index into an array.
"""
result = {}
array_index = 0
while len(combinable_list) != 0:
# Allways, try to combine the largest combinable set first.
k = max(enumerate(combinable_list), key=lambda x: len(x[1]))[0]
combination = combinable_list.pop(k)
for acceptance_id in (x for x in combination if x not in result):
result[acceptance_id] = array_index
# Since: -- The combinations only contain post_context_id's that have not been
# mentioned before, and
# -- all empty combinations are deleted from the combinable_list,
# Thus: It is safe to assume that new entries were made for the current
# array index. Thus, a new array index is required for the next turn.
array_index += 1
# Delete combinations that have become empty
combinable_list = [ x for x in combinable_list if len(x) ]
return result | 085f28c8d1263bc2e547a5671e115d86992af231 | 702,462 |
def clean_chamber_input(chamber):
""" Turns ambiguous chamber information into tuple (int, str) with chamber id and chamber name """
if type(chamber) == str:
if chamber == '1':
chamber = 1
elif chamber == '2':
chamber = 2
elif chamber == 'GA':
chamber = 1
elif chamber == 'SC':
chamber = 2
chamber_name = 'GA' if chamber == 1 else \
'SC' if chamber == 2 else ''
return chamber, chamber_name | 0ad20c117fc90e523e85ef7061a548b20c68dc92 | 702,463 |
def search_for_letters(phrase: str = 'life, the universe, and everything', letters: str = 'forty two') -> set:
"""Display any 'letters' found in a 'phrase'."""
return set(letters).intersection(set(phrase)) | 36944391599abf971512d21819f88dee9af42f36 | 702,464 |
import yaml
def loadyaml(file, default={}):
"""Utility function to load from yaml file"""
try:
with open(file, "r", encoding="utf-8") as f:
t = yaml.load(f, Loader=yaml.FullLoader)
except FileNotFoundError:
t = default
return t | a6720094340b71039d2e53ec1f48493480e4e26a | 702,465 |
from pathlib import Path
import logging
import yaml
def params_from_yaml(args):
"""Extract the parameters for preparation from a yaml file and return a dict"""
# Check the path exists
try:
config_file_path = Path(args.config)
assert config_file_path.exists()
except Exception:
logging.error(f"Could not find config file at {args.config}")
raise
# Load the data from the config file
try:
with open(config_file_path, "r") as f:
params = yaml.safe_load(f)
except Exception:
logging.error(
f"Could not extract parameters from yaml file at {config_file_path}"
)
raise
if "verbose" not in params.keys():
params["verbose"] = True
return params | 36beadd8fa4f27471c514a963838aac216aad434 | 702,466 |
from typing import List
from typing import Union
def list_or_first(x: List[str]) -> Union[List[str], str]:
"""
Returns a list if the number of elements is
greater than 1 else returns the first element of
that list
"""
return x if len(x) > 1 else x[0] | 82e86001b35ecd6542a22fac3c5dd7f7723966d6 | 702,467 |
import re
def parse_version(version_str):
"""'10.6' => [10, 6]"""
return [int(s) for s in re.findall(r'(\d+)', version_str)] | 16cfcfc292eb89b6231a266c687f9dfd8caa5a8d | 702,468 |
import requests
def display_selected_patient_info(MRI):
""" Get a patient's latest information and ECG trace image
As a very important functionality of the server, the function
sends a 'GET' request to the server, get a string that includes
all patient's latest info and ECG image b64 string. Then it
splits each content by the comma, and returns them separately.
Args:
int/str: A integer of medical record number, or a numeric
string that indicates a medical record number
Returns:
str, str, str, str,str, str: 5 separate strings that contain
patient's information about the medical record number, patient
name, latest heart rate, latest ECG image(b64 string) and when
the latest ECG record was updated
"""
r = requests.get("http://127.0.0.1:5000//api/display/info/" + str(MRI))
info_list = r.text[1:-2].split(",")
number = eval(info_list[0])
name = eval(info_list[1])
latest_hr = eval(info_list[2])
latest_ECG_image = eval(info_list[3])
uploaded_time = eval(info_list[4])
return number, name, latest_hr, latest_ECG_image, uploaded_time | d0a47848451801517e9125bbd7de3b40023cbd60 | 702,469 |
def contains_common_item_2(arr1, arr2):
"""
loop through first array and create dictionary object where the keys are the items in the array
loop through the second array and check if item in second array exists in the created dictionary
"""
array1_dict = {}
for item in arr1:
array1_dict[item] = True
for item2 in arr2:
if array1_dict.get(item2, False):
return True
return False | 83b4eafe7904d47fd65db3fc3e5a4d598a51efea | 702,470 |
def complex_function(z):
"""
The complex function to plot. *You can write any function here*.
:param z: a numpy 2d array of complex numbers.
"""
return (z - 1) / (z + 1) | 242f6b84c50dedbbf28edf52c558d572ab4c6c85 | 702,471 |
def stop(job_id, event, action, resource, _count):
"""App stop event type"""
job_name = '{}:event={}:action={}'.format(
resource, event, action
)
func_kwargs = dict(
job_id=job_id,
app_name=resource,
)
return job_name, func_kwargs | 56f1496c860396f76cee105e007803eb850ca679 | 702,474 |
def vector_add(v, w):
"""adds corresponding elements"""
return [v_i + w_i
for v_i, w_i in zip(v, w)] | ed85d5d1158e46109966b3c09f3d33d5acae8c98 | 702,475 |
def nexthop_is_local(next_hop):
"""
Check if next-hop points to the local interface.
Will be True for Connected and Local route strings on Cisco devices.
"""
interface_types = (
'Eth', 'Fast', 'Gig', 'Ten', 'Port',
'Serial', 'Vlan', 'Tunn', 'Loop', 'Null'
)
for type in interface_types:
if next_hop.startswith(type):
return True | fd74119d54998fafcb9400adaaa2c95b42671734 | 702,476 |
def logistic_expval(mu, tau):
"""
Expected value of logistic distribution.
"""
return mu | f6ac18144d5543d50c04f8e042bb8b5f8c8ea5ec | 702,477 |
def is_(var):
"""intuitive handling of variable truth value also for `numpy` arrays.
Return `True` for any non-empty container, otherwise the truth value of the
scalar `var`.
Caveat of the most unintuitive case: [0] evaluates to True, like [0, 0].
>>> import numpy as np
>>> from cma.utilities.utils import is_
>>> is_({}) or is_(()) or is_(0) or is_(None) or is_(np.array(0))
False
>>> is_({0:0}) and is_((0,)) and is_(np.array([0]))
True
"""
try: # cases: ('', (), [], {}, np.array([]))
return True if len(var) else False
except TypeError: # cases None, False, 0
return True if var else False | dd7be0e1535c6a616a1984b7d4b4cab3b03fbfbd | 702,478 |
import re
import tarfile
import io
def _check_open_tarball(testcase, response):
"""
Check http-response headers and open tar ball from content.
"""
testcase.assertTrue(re.search(r'attachment;\s*filename="[^"]*.tar.gz"',
response['Content-Disposition']))
testcase.assertEqual(response['Content-Type'], 'application/gzip')
testcase.assertEqual(int(response['Content-Length']), len(response.content))
tar = tarfile.open(mode='r:gz', fileobj=io.BytesIO(response.content))
return tar | ffa0aa59f102fffb3c11664ee5f486f424b6acd8 | 702,479 |
def GetKDPPacketHeaderInt(request=0, is_reply=False, seq=0, length=0, key=0):
""" create a 64 bit number that could be saved as pkt_hdr_t
params:
request:int - 7 bit kdp_req_t request type
is_reply:bool - False => request, True => reply
seq: int - 8 sequence number within session
length: int - 16 bit length of entire pkt including hdr
key: int - session key
returns:
int - 64 bit number to be saved in memory
"""
retval = request
if is_reply:
retval = 1<<7 |retval
retval = (seq << 8) | retval
retval = (length << 16) | retval
#retval = (retval << 32) | key
retval = (key << 32) | retval
return retval | 7a56abb0f1ccbe1a7da1a9e0c6b70418e00ef0be | 702,480 |
import json
def save_items_list(drive_service, file_location):
"""
save cloud items list in a json file
:param file_location: location where file needs to be saved
:param drive_service: servive object for drive
:return: True if list successfully saved else false
"""
files = drive_service.files().list().execute()
if not files['incompleteSearch']:
files = files['items']
dict_to_json_dump = json.dumps(files)
with open(file_location, 'w+') as json_file:
json_file.write(dict_to_json_dump)
json_file.close()
return True
return False | 9f5dc557bbbfa102bf4acfa3fb9af9014b229c83 | 702,481 |
import json
def craft_unknown_asset_message(
username, emoji, channel, launch_time, cloud_provider, vps_name, tags
):
"""Function to craft a nicely formatted Slack message using blocks
for cloud assets not found in Ghostwriter.
"""
UNKNOWN_ASSET_MESSAGE = {
"username": username,
"icon_emoji": emoji,
"channel": channel,
"text": "An *untracked* cloud asset is running without being attached to a project. If this asset should be ignored, add the `gw_ignore` tag.",
"blocks": [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "An *untracked* cloud asset is running without being attached to a project. If this asset should be ignored, add the `gw_ignore` tag.",
}
},
{
"type": "section",
"fields": [
{
"type": "mrkdwn",
"text": "*Cloud Provider:*\n{}".format(cloud_provider)
},
{
"type": "mrkdwn",
"text": "*Instance Name:*\n{}".format(vps_name)
},
{
"type": "mrkdwn",
"text": "*Launch Date:*\n{}".format(launch_time)
},
{
"type": "mrkdwn",
"text": "*Tags:*\n{}".format(tags)
}
]
}
]
}
return json.dumps(UNKNOWN_ASSET_MESSAGE) | fea3e96b93bfaba8971680fd0af52eccd543e0f0 | 702,482 |
def tilted_L1(u, quantile=0.5):
"""
tilted_L1(u; quant) = quant * [u]_+ + (1 - quant) * [u]_
"""
return 0.5 * abs(u) + (quantile - 0.5) * u | ff7a3fe97d79e4c848797c79a2a7c14b449ad6b6 | 702,484 |
def getId(collection):
""" Get the ImageCollection id.
**CLIENT SIDE**
:type collection: ee.ImageCollection
:return: the collection's id
:rtype: str
"""
return collection.limit(0).getInfo()['id'] | ad08535eb838cfa4d7153efd476b6dac14d118bc | 702,485 |
def bytes_to_human(size, digits=2, binary=True):
"""Convert a byte value to the largest (> 1.0) human readable size.
Args:
size (int): byte size value to be converted.
digits (int, optional): number of digits used to round the converted
value. Defaults to 2.
binary (bool, optional): convert to binary (True) or decimal (False)
units. Defaults to True.
Returns:
str: value translated to a human readable size.
"""
units = 1024 if binary else 1000
conversion = ["B", "KB", "MB", "GB", "TB", "PB", "EB"]
index = 0
value = [size if isinstance(size, (int, float)) else 0, conversion.pop(0)]
while value[0] > units and conversion:
index += 1
value[0] = float(size) / (units ** index)
value[1] = conversion.pop(0)
if units == 1024 and len(value[1]) > 1:
value[1] = "{}i{}".format(*value[1])
return "".join([str(round(value[0], digits)), value[1]]) | 22367220a122e399658a0dd42b52083ccc29df6f | 702,486 |
import requests
import toml
def get_stellar_toml(domain, allow_http=False):
"""Retrieve the stellar.toml file from a given domain.
Retrieve the stellar.toml file for information about interacting with
Stellar's federation protocol for a given Stellar Anchor (specified by a
domain).
:param str domain: The domain the .toml file is hosted at.
:param bool allow_http: Specifies whether the request should go over plain
HTTP vs HTTPS. Note it is recommend that you *always* use HTTPS.
:return: The stellar.toml file as a an object via :func:`toml.loads`.
"""
toml_link = '/.well-known/stellar.toml'
if allow_http:
protocol = 'http://'
else:
protocol = 'https://'
url_list = ['', 'www.', 'stellar.']
url_list = [protocol + url + domain + toml_link for url in url_list]
for url in url_list:
r = requests.get(url)
if r.status_code == 200:
return toml.loads(r.text)
return None | 665ff032834d3bb501e1ff9a8591d54d6d3bb5a8 | 702,487 |
def _validate_method(method, where):
"""Helper for get_params()"""
if method is None:
return None
if method not in ['signal', 'thread']:
raise ValueError('Invalid method %s from %s' % (method, where))
return method | dbf7194f50a43dc3fd944f8923ddfec5aed3bbe7 | 702,488 |
import pathlib
def parent(path: str) -> str:
"""Get path's parent
e.g
j.sals.fs.parent("/home/rafy/testing_make_dir/test1") -> '/home/rafy/testing_make_dir'
Args:
path (str): path to get its parent
Returns:
str: parent path.
"""
return str(pathlib.Path(path).parent) | 8ed409fde19dcd74d3fb2c169946680a0c46543b | 702,489 |
def _getText(nodelist):
""" returns collected and stripped text of textnodes among nodes in nodelist """
rc = ""
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc = rc + node.data
return rc.strip() | 37548ebf34f0f26cc4166e95621ee1ec0f3a3f71 | 702,490 |
import re
def strip_html(text):
"""See http://stackoverflow.com/a/9662362"""
return re.sub('<[^<]+?>', '', text) | 94747883a5df06b70fb3513e82e610d981582bbb | 702,491 |
def from_json(json_data, key):
"""Extract values from JSON data.
:arg dict json_data: The JSON data
:arg str key: Key to get data for.
:Returns: The value of `key` from `json_data`, or None if `json_data`
does not contain `key`.
"""
if key in json_data:
return json_data[key]
return None | 2e5a97176c771c1c363fd7fbf86d8bf7b598f4ab | 702,492 |
def weights_dask(weights):
"""
Weighting array by cosine of the latitude.
"""
return weights.chunk() | d1b4259b806665760506b43cf072ceb61fcb2d8d | 702,493 |
import os
import binascii
def get_secure_random_string(size):
"""
Return a string of ``size`` random bytes. Returned string is suitable for
cryptographic use.
:param size: Size of the generated string.
:type size: ``int``
:return: Random string.
:rtype: ``str``
"""
value = os.urandom(size)
value = binascii.hexlify(value)
value = value.decode('utf-8')[:size]
return value | fde036e0183b4ec7920e1dffefc6097c93752d58 | 702,494 |
def project(a):
""" De-homogenize vector """
return a[:-1] / float(a[-1]) | 68074a4fb9c5021f7e727654e699d823d093c3a6 | 702,495 |
import os
def ospathjoin(*args, **kwargs):
"""
Simple ``o.path.join`` for a specific platform.
@param args list of paths
@param kwargs additional parameters, among them,
*platform* (win32 or ...)
@return path
"""
def build_value(*args, **kwargs):
platform = kwargs.get('platform', None)
if platform is None:
return os.path.join(*args)
elif platform.startswith("win"):
return "\\".join(args)
return "/".join(args)
value = build_value(*args, **kwargs)
if value == "/$PYINT":
raise RuntimeError( # pragma: no cover
"Impossible values {} - {}.".format(args, kwargs))
return value | 025c4f5dc352df00d3aa8586a2cde88c63518eaa | 702,496 |
def screen_aos(mol, active_atoms, den_mat_a, ovlp, trunc_lambda):
"""Screen AOs for truncation"""
include = [False] * mol.nbas
active_aos = []
for shell in range(mol.nbas):
aos_in_shell = list(range(mol.ao_loc[shell], mol.ao_loc[shell + 1]))
if mol.bas_atom(shell) not in active_atoms: # shells on active atoms are always kept
for ao_i in aos_in_shell:
if (den_mat_a[ao_i, ao_i] * ovlp[ao_i, ao_i]) > trunc_lambda:
break
else: # if nothing trips the break, these AOs aren't kept and we move on
continue
include[shell] = True
active_aos += aos_in_shell
return active_aos, include | b4856265441d1a51d09d3c261b39ec655cf202e4 | 702,497 |
def spell_check(text, spell):
"""Fix misspelled words"""
suggestions = spell.lookup_compound(text, 2)
if suggestions:
return suggestions[0].term
else:
return text | 6ada77bbc6d691fa0e650f27f9a1e09885e70c14 | 702,498 |
from operator import inv
def n_clinic_to_unit_cube(x_array,box):
"""
---Inputs---
x_array : {2 mode numpy array}
array defining the "real" coordinate(s) at which to calculate values of
basis functions
shape (n_points, d)
dimension of first mode gives number of points
dimension of second mode gives number of spatial dimensions (d)
box : {2 mode numpy array}
square array of shape (d, d) defining the region in which points x_array live,
each row is a vector
---Outputs---
u_array : {numpy array}
coordinates transformed into unit n-cube, shape (n_points, d)
"""
if (len(box.shape) != 2):
print('ERROR: box must be 2 mode numpy array')
return
elif (box.shape[0] != box.shape[1]):
print('ERROR: box array must be square')
return
# given box vectors in columns of V, let L be the mapping from unit n-cube to box
# L I = V --> L = V
# then L^{-1} maps from "real" box coordinates to the unit n-cube
# L^{-1} V = I
V = box.T #box variable has cell vectors stored in rows, above formulation has them as columns of V
L = V
L_inv = inv(L)
#transform coordinates
u_array_transposed = L_inv@(x_array.T) #transform every coordinate into unit n-cube
u_array = u_array_transposed.T #shape (n_points, n_dim)
return u_array | d55dd6f99047935df395dde174b4525a2f6b9bcd | 702,499 |
def dummies(data):
"""
Manually creates labels for property type. sklearn label encoder was breaking the model shape.
"""
data['Property_Area'] = data['Property_Area'].replace('Rural', 0, regex=True)
data['Property_Area'] = data['Property_Area'].replace('Semiurban', 1, regex=True)
data['Property_Area'] = data['Property_Area'].replace('Urban', 2, regex=True)
return data | 607da5c4a023c789a3a6da27d0ddfe4bfafa7d3a | 702,500 |
def assert_all_loaded(pairs, raise_=True):
"""
Returns True if all SleepStudy objects in 'pairs' have the 'loaded'
property set to True, otherwise returns False.
If raise_ is True, raises a NotImplementedError if one or more objects are
not loaded. Otherwise, returns the value of the assessment.
Temp. until queue functionality implemented
"""
loaded_pairs = [p for p in pairs if p.loaded]
if len(loaded_pairs) != len(pairs):
if raise_:
raise NotImplementedError("BatchSequence currently requires all"
" samples to be loaded")
else:
return False
return True | d211b77c2d16fafaff5555701af66d0144fb0b73 | 702,501 |
import os
def expand_params(params_to_env):
"""
Given a dictionary like:
{
"AwsAccessKeyId": "AWS_ACCESS_KEY_ID",
"AwsSecretAccessKey": "AWS_SECRET_ACCESS_KEY",
"KeyNukerOrg": "KEYNUKER_ORG",
}
Convert to a string like:
--param KeyNukerOrg default --param AwsAccessKeyId ***** --param AwsSecretAccessKey ********
Where the param values are created based on the contents of the corresponding environment
variable (eg, AwsAccessKeyId)
"""
result_list = []
# Some parameters can be empty -- eg, if the corresponding env variable doesn't exist,
# just ignore it and don't add the param
allowed_empty = ["GithubApiUrl"]
for paramName, envVarName in params_to_env.iteritems():
if paramName == "GithubOrgs":
# This needs special handling since it's an array
continue
if paramName == "GithubUsers":
# This needs special handling since it's an array
continue
if paramName == "TargetAwsAccounts":
# This needs special handling since it's an array
continue
if paramName == "InitiatingAwsAccountAssumeRole":
# This needs special handling since it's a dictionary
continue
if paramName == "WebAction":
# This needs special handling since the format is "--web true" rather than "--param name value"
continue
envVarVal = os.environ.get(envVarName)
if envVarVal is None:
if paramName in allowed_empty:
continue # Skip this param
raise Exception("You must set the {} environment variable".format(envVarName))
result_list.append("--param")
result_list.append(paramName)
result_list.append('{}'.format(envVarVal))
result = " ".join(result_list)
if "GithubOrgs" in params_to_env:
envVarName = params_to_env["GithubOrgs"]
envVarVal = os.environ.get(envVarName)
result += " --param GithubOrgs "
result += "\'{}\'".format(envVarVal)
if "GithubUsers" in params_to_env:
envVarName = params_to_env["GithubUsers"]
envVarVal = os.environ.get(envVarName)
result += " --param GithubUsers "
result += "\'{}\'".format(envVarVal)
if "TargetAwsAccounts" in params_to_env:
envVarName = params_to_env["TargetAwsAccounts"]
envVarVal = os.environ.get(envVarName)
result += " --param TargetAwsAccounts "
result += "\'{}\'".format(envVarVal)
if "InitiatingAwsAccountAssumeRole" in params_to_env:
envVarName = params_to_env["InitiatingAwsAccountAssumeRole"]
envVarVal = os.environ.get(envVarName)
result += " --param InitiatingAwsAccountAssumeRole "
result += "\'{}\'".format(envVarVal)
if "WebAction" in params_to_env:
result += " --web true"
return result | 37833e73a0f6375689cad5bfd114e1ce1431c47f | 702,503 |
def remote_shortname(socket):
"""
Obtains remote hostname of the socket and cuts off the domain part
of its FQDN.
"""
return socket.gethostname().split('.', 1)[0] | e52ec17a36800029a9889dc1b5e567567b9c9340 | 702,504 |
def formatParagraphLine(text, width):
"""
:return: array of rows
"""
words = text.split()
tail = words
result = []
buf = ''
while len(tail):
curWord, tail = tail[0], tail[1:]
if len(buf) + len(curWord) + 1 > width:
if buf == '':
row = curWord
buf = ''
else:
row = buf
buf = curWord
row += ' ' * (width - len(row))
result.append(row)
else:
if len(buf):
buf += ' '
buf += curWord
if len(buf):
result.append(buf + ' ' * (width - len(buf)))
return result | 14672342e24706df117425d5e59f9d2c762faab2 | 702,505 |
def encodewrap(s):
"""改行コードを\nに置換する。"""
r = []
if not s:
return u""
for c in s:
if c == '\\':
r.append("\\\\")
elif c == '\n':
r.append("\\n")
elif c == '\r':
pass
else:
r.append(c)
return "".join(r) | a3486f3340d898751371fb4d4e29876120ec280b | 702,506 |
import urllib3
import json
def get_public_urls():
"""Get a list of available external tunnel URLs"""
urls = []
try:
http = urllib3.PoolManager()
req = http.request('GET', 'http://localhost:4040/api/tunnels')
data = str(req.data, 'utf-8')
obj = json.loads(data)
tunnels = obj['tunnels']
for tunnel in tunnels:
urls.append(tunnel['public_url'])
except:
# Ngrok not running.
pass
return urls | d17e57cafe0833f35b4c5742a96901869dd8b7b1 | 702,507 |
def is_fragment(href):
"""Return True if href is a fragment else False"""
is_fragment = False
try:
if href[0] == '#':
is_fragment = True
except IndexError:
is_fragment = False
return is_fragment | da1899ac1b59b8a6a42034a46194ff0d09a8799d | 702,508 |
import os
def is_dir(path):
"""Is the path a directory"""
return os.path.isdir(path) | 3ea885ce332c37ba069a34a493d3fb07b4c6d976 | 702,509 |
def myocardialmass(myocardvol):
"""
Specific gravity of heart muscle (1.05 g/ml)
"""
return myocardvol*1.05 | ca12a23a27a3dc2cb3df96abe4bfe94424125b14 | 702,510 |
async def challenge(websocket, user):
"""Challenges a user.
"""
return await websocket.send(f'|/challenge {user}, gen8metronomebattle') | bee53e79d085ebc7f8a48e0091adb2e6605a2ac6 | 702,511 |
def create_character_ngrams(text_list, length):
""" Create character ngrams of the specified length from a string of text
Args:
text_list (list): Pre-tokenized text token to process.
length (int): Length of ngrams to create.
http://stackoverflow.com/questions/18658106/quick-implementation-of-character-n-grams-using-python
"""
result = set()
for token in text_list:
if token.lower() != "user_mention":
result = result.union(set(
[token.lower()[i:i + length] for i in range(len(token.lower()) - length + 1)]))
return list(result) | 5df4dda2fe83a921c3b949fa3e9e7e843c333530 | 702,512 |
def air2vacMortonIAU(wl_air):
"""Take an input air wavelength in Angstroms and return the vacuum
wavelength.
Formula taken from
https://www.astro.uu.se/valdwiki/Air-to-vacuum%20conversion
"""
s = 1e4 / wl_air
n = 1 + 0.00008336624212083 + (0.02408926869968 / (130.1065924522 - s**2))\
+ (0.0001599740894897 / (38.92568793293 - s**2))
return wl_air * n | 66823170493c3b7d8f63bd7d2e32364aa5efa78e | 702,513 |
import os
def verify_path(file_name, folder):
"""Ensures folders for checkpoints and network parameters exists.
:param file_name: Name of file.
:param folder: Name of folder
"""
folder_path = os.path.join(os.getcwd(), folder)
if not os.path.isdir(folder_path):
os.makedirs(folder_path)
return os.path.join(os.getcwd(), folder, file_name) | 6f933651b25078873ac660cf655b113748bb418a | 702,514 |
def get_max_node(candidates, gr_values, column_prefix=""):
"""Given a set of candidate nodes, and return the one with the
highest Gain Ratio.
Args:
candidates (list): List of candidate nodes.
gr_values (dict): Dictionary with column names as keys and
the corresponding Gain Ratio values as values.
column_prefix (str): Prefix of the columns generated by the generator
(e.g. "new_link_type_"). Defaults to "".
Returns:
str: Name of the node with the highest Gain Ratio in the candidate set.
"""
max_gr = 0
max_gr_node = None
for node in candidates:
try:
gr = gr_values[column_prefix+node]
except:
gr = gr_values[node]
if gr > max_gr or (gr == 0 and len(candidates) == 1):
max_gr = gr
max_gr_node = node
return max_gr_node | 5d64dad256784a932bcdc85265713d2b32c19c47 | 702,516 |
def tablefy(data, column=None, gutter=1, width=79):
"""
Convert a list of strings into being a table, displaying in a left-to-right
and top-to-bottom pattern. This does not sort the values.
:param data: list of strings
:param column: width of column, if None, detected
:param gutter: width of gutter
:param width: width of entire table to fill
:returns: newline separated string
"""
if not data:
return ""
lines = []
if column is None:
column = max([len(s) for s in data])
per_line = max(int(width / column), 1)
gutter_chars = " " * gutter
items = []
for entry in data:
items.append(entry.ljust(column))
if len(items) == per_line:
lines.append(gutter_chars.join(items))
items = []
if items:
lines.append(gutter_chars.join(items))
return "\n".join(lines) | d8aa63421578b9a0c4cb9c3fa106e7336de946d9 | 702,517 |
import functools
def once(func):
"""
Decorate func so it's only ever called the first time.
This decorator can ensure that an expensive or non-idempotent function
will not be expensive on subsequent calls and is idempotent.
>>> func = once(lambda a: a+3)
>>> func(3)
6
>>> func(9)
6
>>> func('12')
6
"""
def wrapper(*args, **kwargs):
if not hasattr(func, 'always_returns'):
func.always_returns = func(*args, **kwargs)
return func.always_returns
return functools.wraps(func)(wrapper) | 60e6c4a9e26b322c27da74ca98e119056247b900 | 702,518 |
def _element_basis(string: str):
"""
Parse element and basis from string
Args: str
Returns: element, basis
"""
cut_list = string.split(".")
element = cut_list[0]
basis = " ".join(cut_list[1:])
return element, basis | c6be1c2a05832bb3bbc4577015b541136c724a08 | 702,520 |
def proportion_linear(time_right: float,
time_between: float,
time_step: float) -> tuple:
"""
:param time_right: right border in time
:param time_between: time between borders
:param time_step: step in time between borders
:return: _Iterable_(alpha, beta)
Typical usage example:
time_between = 1.98
time_step = time_2 - time_1
alpha, beta = proportion_linear(time_2, time_between, time_step)
assert alpha + beta == 1
assert alpha > beta
"""
beta = (time_between - time_right) / time_step
return (1 - beta), beta | a2841baf2b3022e2be7f47111e63f1f6eb9e76e2 | 702,523 |
import requests
def getusercid(username):
"""
getusercid(string) -> string
tries to retrieve the CID of an avatar for a name to cid lookup
"""
getuser = 'http://www.imvu.com/catalog/web_av_pic.php?av=%s' % (username)
r = requests.get(getuser)
link = r.url
cid = link[link.index('avatars/')+len('avatars/'):link.index('_')]
return cid | 3f296e82191290646bef5e0c6e209f5d47b273f7 | 702,524 |
def mr2rho(w,t,p):
"""rho = mr2rho(w,t,p)
w in g/kg
t in K
p in mb
This was provide by Holger Linne' from Max Planck Institute.
Dave Turner
Pacific Northwest National Laboratory
Currently at the University of Wisconsin-Madison
[email protected]
"""
rho = w * (p * 0.3477) / t
return rho | 595cbb884a6824c7ec5fd28a1110a4dff1016417 | 702,525 |
def get_block_time():
"""
Returns block time
Currently hardcoded
"""
return 0.25 | 80b0a035403384c7dbf17b7ee5971fe83cbe96cf | 702,526 |
def satellite(isochrone, kernel, stellar_mass, distance_modulus,**kwargs):
"""
Wrapping the isochrone and kernel simulate functions.
"""
mag_1, mag_2 = isochrone.simulate(stellar_mass, distance_modulus)
lon, lat = kernel.simulate(len(mag_1))
return mag_1, mag_2, lon, lat | a9522074bc64722f0991e3ac5747abe8fe6b226d | 702,528 |
def check_negation(text, NEGATION_MAP):
"""
Utility function to check negation of an emotion
:param text: text chunk with the emotion term
:return: boolean value for negation
"""
neg_word_list = NEGATION_MAP
neg_match = False
for neg_word in neg_word_list:
if neg_word.strip() in text:
neg_match = True
return neg_match | 020b7f692264754d8b76111c709cc31710ba6339 | 702,529 |
def latex_to_unicode(string):
"""Returns a unicode representation from latex strings used by pyFAI.
.. note:: The latex string could be removed from the pyFAI core.
:param str string: A latex string to convert
:rtype: str
"""
string = string.replace("$", u"")
string = string.replace("^{-2}", u"⁻²")
string = string.replace("^{-1}", u"⁻¹")
string = string.replace("^.", u"⋅")
string = string.replace("2\\theta", u"2θ")
string = string.replace("^{o}", u"°")
string = string.replace("\\AA", u"Å")
string = string.replace("log10", u"log₁₀")
string = string.replace("^{*2}", u"d*²")
return string | 2ef821cfc99ea6241b3d6bea95109f36f6cb135f | 702,530 |
def _tree_to_json(clf, features, labels, node_index=0):
"""Structure of rules in a fit decision tree classifier
Parameters
----------
clf : DecisionTreeClassifier
A tree that has already been fit.
features, labels : lists of str
The names of the features and labels, respectively.
Returns
----------
Decision tree json.
"""
node = {}
if clf.tree_.children_left[node_index] == -1: # indicates leaf, leaf noode
num_examples = clf.tree_.n_node_samples[node_index]
value = clf.tree_.value[node_index, 0].tolist()
distribution = [round(i/num_examples, 4) for i in value]
node['value'] = {'type': 'PROBABILITY',
'distribution': distribution,
'num_examples': f"{num_examples},{format(distribution[-1], '.2%')}"
}
else: # split node
feature = features[clf.tree_.feature[node_index]]
threshold = clf.tree_.threshold[node_index]
num_examples = clf.tree_.n_node_samples[node_index]
value = clf.tree_.value[node_index, 0].tolist()
distribution = [round(i/num_examples, 4) for i in value]
node['value'] = {'type': 'PROBABILITY',
'distribution': distribution,
'num_examples': f"{num_examples},{format(distribution[-1], '.2%')}"
}
node['condition'] = {'type': 'NUMERICAL_IS_HIGHER_THAN',
'attribute': feature,
'threshold': round(threshold, 4)
}
left_index = clf.tree_.children_left[node_index]
right_index = clf.tree_.children_right[node_index]
node['children'] = [_tree_to_json(clf, features, labels, right_index),
_tree_to_json(clf, features, labels, left_index)]
return node | 02689b1d0ccd6b5fb39def7e69a07a87770afd5d | 702,531 |
def find_bean_by_name(jsn, nme):
"""
Extracts a bean of the given name from jmx metrics json object.
"""
if 'beans' not in jsn:
return None
else:
return next((b for b in jsn['beans'] if b['name'] == nme), None) | 826115bff5a5c1a4ee58560a55641ccf99c1541f | 702,532 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.