content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def MaybeGetHexShaOfLastExportedCommit(
repo: git.Repo, head_ref: str = "HEAD") -> typing.List[str]:
"""The the SHA1 of the most recently exported commit.
Args:
repo: The repo to iterate over.
head_ref: The starting point for iteration, e.g. the commit closest to
head.
Returns:
The hex SHA1 of the last exported commited, else None.
"""
export_re = re.compile(r'\n\[Exported from ([a-fA-F0-9]{40})\]')
try:
for commit in repo.iter_commits(head_ref):
if '\n[Exported from ' in commit.message:
match = export_re.search(commit.message)
assert match
return match.group(1)
except git.GitCommandError:
# Raise if no HEAD, i.e. no commits.
pass
return None | 2,900 |
def Quit():
"""
Closes the client
"""
pass | 2,901 |
def get_inchi(ID):
"""This function accept UNIQUE-ID and return InChI string of a certain compound"""
inchi = df_cpd['INCHI'][ID]
return inchi | 2,902 |
def run_samtools_faidx(job, ref_id):
"""
Use Samtools to create reference index file
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str ref_id: FileStoreID for the reference genome
:return: FileStoreID for reference index
:rtype: str
"""
job.fileStore.logToMaster('Created reference index')
work_dir = job.fileStore.getLocalTempDir()
job.fileStore.readGlobalFile(ref_id, os.path.join(work_dir, 'ref.fasta'))
command = ['faidx', '/data/ref.fasta']
dockerCall(job=job, workDir=work_dir, parameters=command,
tool='quay.io/ucsc_cgl/samtools:0.1.19--dd5ac549b95eb3e5d166a5e310417ef13651994e')
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'ref.fasta.fai')) | 2,903 |
def test_gdalversion_class_runtime():
"""Test the version of GDAL from this runtime"""
GDALVersion.runtime().major >= 1 | 2,904 |
def pad_to_multiple(array: Array,
factor: int,
axis: int,
mode: Optional[str] = 'constant',
constant_values=0) -> Array:
"""Pads `array` on a given `axis` to be a multiple of `factor`.
Padding will be concatenated to the end of the axis only, not the beginning.
If the length along `axis` is already a multiple of `factor`, this is
effectively a no-op.
Args:
array: Array with rank >= 1 to pad.
factor: Positive integer factor to pad for.
axis: A valid axis in `array` to pad.
mode: The padding mode to use according to `jnp.pad`. Defaults to
'constant'. See `jax.numpy.pad` documentation for more.
constant_values: For 'constant' mode, the pad value to use within `jnp.pad`.
Defaults to 0.
Returns:
The padded Array result.
"""
array = jnp.asarray(array)
if factor < 1:
raise ValueError(f'`factor` must be positive but got {factor}.')
rank = array.ndim
if axis < -rank or axis >= rank:
raise ValueError(
f'`axis` ({axis}) out of bounds for `array` rank ({rank}).')
axis_len = array.shape[axis]
pad_len = -axis_len % factor
pad_width = [(0, 0)] * rank
pad_width[axis] = (0, pad_len)
kwargs = {}
if mode == 'constant':
kwargs['constant_values'] = constant_values
return jnp.pad(array=array, pad_width=pad_width, mode=mode, **kwargs) | 2,905 |
def easy2dict(config: easydict.EasyDict):
"""
:param config: EasyDict参数
"""
# fix a Bug: cfg = dict(config) 仅仅转换第一层easydict
cfg = json.loads(json.dumps(config))
return cfg | 2,906 |
def define_request(
dataset,
query=None,
crs="epsg:4326",
bounds=None,
bounds_crs="EPSG:3005",
sortby=None,
pagesize=10000,
):
"""Define the getfeature request parameters required to download a dataset
References:
- http://www.opengeospatial.org/standards/wfs
- http://docs.geoserver.org/stable/en/user/services/wfs/vendor.html
- http://docs.geoserver.org/latest/en/user/tutorials/cql/cql_tutorial.html
"""
# validate the table name and find out how many features it holds
table = validate_name(dataset)
n = bcdata.get_count(table, query=query)
wfs = WebFeatureService(url=bcdata.OWS_URL, version="2.0.0")
geom_column = wfs.get_schema("pub:" + table)["geometry_column"]
# DataBC WFS getcapabilities says that it supports paging,
# and the spec says that responses should include 'next URI'
# (section 7.7.4.4.1)....
# But I do not see any next uri in the responses. Instead of following
# the paged urls, for datasets with >10k records, just generate urls
# based on number of features in the dataset.
chunks = math.ceil(n / pagesize)
# if making several requests, we need to sort by something
if chunks > 1 and not sortby:
sortby = get_sortkey(table)
# build the request parameters for each chunk
param_dicts = []
for i in range(chunks):
request = {
"service": "WFS",
"version": "2.0.0",
"request": "GetFeature",
"typeName": table,
"outputFormat": "json",
"SRSNAME": crs,
}
if sortby:
request["sortby"] = sortby
# build the CQL based on query and bounds
# (the bbox param shortcut is mutually exclusive with CQL_FILTER)
if query and not bounds:
request["CQL_FILTER"] = query
if bounds:
b0, b1, b2, b3 = [str(b) for b in bounds]
bnd_query = f"bbox({geom_column}, {b0}, {b1}, {b2}, {b3}, '{bounds_crs}')"
if not query:
request["CQL_FILTER"] = bnd_query
else:
request["CQL_FILTER"] = query + " AND " + bnd_query
if chunks > 1:
request["startIndex"] = i * pagesize
request["count"] = pagesize
param_dicts.append(request)
return param_dicts | 2,907 |
def get_bert_input(
examples: List[tuple],
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Convert input list to torch tensor.
Args:
examples: (input_id_list, )
Returns:
attention_mask, input_ids_tensor, token_type_ids_tensor
"""
input_ids = examples[0]
token_type_ids = examples[1]
max_seq_len = min(max(len(input_id) for input_id in input_ids), MAX_SEQ_LEN)
input_ids_tensor = torch.zeros((len(input_ids), max_seq_len), dtype=torch.long)
token_type_ids_tensor = torch.zeros_like(input_ids_tensor)
attention_mask = torch.ones_like(input_ids_tensor)
for i, input_id in enumerate(input_ids):
cur_seq_len = len(input_id)
if cur_seq_len <= max_seq_len:
input_ids_tensor[i, :cur_seq_len] = torch.tensor(input_id, dtype=torch.long)
token_type_ids_tensor[i, :cur_seq_len] = torch.tensor(
token_type_ids[i], dtype=torch.long
)
attention_mask[i, cur_seq_len:] = 0
else:
input_ids_tensor[i] = torch.tensor(
input_id[: max_seq_len - 1] + [102], dtype=torch.long
)
token_type_ids_tensor[i] = torch.tensor(
token_type_ids[i][:max_seq_len], dtype=torch.long
)
return attention_mask, input_ids_tensor, token_type_ids_tensor | 2,908 |
def _format_exception(e: BaseException):
"""
Shamelessly stolen from stdlib's logging module.
"""
with io.StringIO() as sio:
traceback.print_exception(e.__class__, e, e.__traceback__, None, sio)
return sio.getvalue().strip() | 2,909 |
def batch_deploy(blueprint_id,
parent_deployments,
group_id=None,
new_deployment_ids=None,
inputs=None,
labels=None,
**_):
"""
Create deployments for a batch from a single blueprint.
:param blueprint_id: The blueprint, which has already been uploaded.
:type blueprint_id: str
:param parent_deployments: A list of parent deployments.
:type parent_deployments: list
:param group_id: the new group ID.
:type group_id: str
:param new_deployment_ids: a list of new deployment names.
:type new_deployment_ids: list
:param inputs: A list of inputs to the new deployments.
:type inputs: list
:param labels: A list of labels to the new deployments.
:type labels: list
:return: group_id
:rtype: str
"""
if not isinstance(parent_deployments, list):
# If someone sends a list in the CLI,
# it will not be properly formatted.
try:
parent_deployments = json.loads(parent_deployments)
except json.JSONDecodeError:
raise NonRecoverableError(
'The parent_deployments parameter is not properly formatted. '
'Proper format is a list, a {t} was provided: {v}.'.format(
t=type(parent_deployments), v=parent_deployments))
group_id = group_id or generate_group_id_from_blueprint(
blueprint_id)
new_deployment_ids = new_deployment_ids or \
generate_deployment_ids_from_group_id(group_id, parent_deployments)
inputs = generate_inputs_from_deployments(inputs, parent_deployments)
labels = labels or generate_labels_from_inputs(inputs)
create_deployments(
group_id,
blueprint_id,
new_deployment_ids,
inputs,
labels)
return group_id | 2,910 |
def compute_task_def(build, settings, fake_build):
"""Returns a swarming task definition for the |build|.
Args:
build (model.Build): the build to generate the task definition for.
build.proto.infra and build.proto.input.properties must be initialized.
settings (service_config_pb2.SettingsCfg): global settings.
fake_build (bool): False if the build is not going to be actually
created in buildbucket. This is used by led that only needs the definition
of the task that *would be* used for a new build like this.
Returns a task_def dict.
Corresponds to JSON representation of
https://cs.chromium.org/chromium/infra/luci/appengine/swarming/swarming_rpcs.py?q=NewTaskRequest&sq=package:chromium&g=0&l=438
"""
assert isinstance(build, model.Build), type(build)
assert isinstance(fake_build, bool), type(fake_build)
assert build.proto.HasField('infra')
assert build.proto.input.HasField('properties')
assert isinstance(settings, service_config_pb2.SettingsCfg)
sw = build.proto.infra.swarming
task = {
'name': 'bb-%d-%s' % (build.proto.id, build.builder_id),
'tags': _compute_tags(build, settings),
'priority': str(sw.priority),
'task_slices': _compute_task_slices(build, settings),
}
if build.proto.number: # pragma: no branch
task['name'] += '-%d' % build.proto.number
if sw.task_service_account: # pragma: no branch
# Don't pass it if not defined, for backward compatibility.
task['service_account'] = sw.task_service_account
if not fake_build: # pragma: no branch | covered by swarmbucketapi_test.py
task['pubsub_topic'] = 'projects/%s/topics/swarming' % (
app_identity.get_application_id()
)
task['pubsub_userdata'] = json.dumps(
{
'build_id': build.proto.id,
'created_ts': utils.datetime_to_timestamp(utils.utcnow()),
'swarming_hostname': sw.hostname,
},
sort_keys=True,
)
return task | 2,911 |
def parse_markdown(page, target=None, pages=None, categories=[], mode="html",
current_time="", bypass_errors=False):
"""Takes a page object (must contain "md" attribute) and returns parsed
and filtered HTML."""
target = get_target(target)
logger.info("Preparing page %s" % page["name"])
# We'll apply these filters to the page
page_filters = get_filters_for_page(page, target)
logger.debug("Filters for page {pg}: {fl}".format(
pg=page["name"], fl=page_filters))
# Get the markdown, preprocess, and apply md filters
try:
md = preprocess_markdown(page,
target=target,
categories=categories,
mode=mode,
current_time=current_time,
page_filters=page_filters,
bypass_errors=bypass_errors,
)
except Exception as e:
traceback.print_tb(e.__traceback__)
recoverable_error("Couldn't preprocess markdown for page %s: %s(%s)" %
(page["name"], repr(e), str(e)), bypass_errors)
# Just fetch the md without running the preprocessor
md = preprocess_markdown(page,
target=target,
categories=categories,
mode=mode,
current_time=current_time,
page_filters=page_filters,
bypass_errors=bypass_errors,
skip_preprocessor=True
)
# Actually parse the markdown
logger.info("... parsing markdown...")
html = markdown(md, extensions=["markdown.extensions.extra",
"markdown.extensions.toc"],
lazy_ol=False)
# Apply raw-HTML-string-based filters here
for filter_name in page_filters:
if "filter_html" in dir(config.filters[filter_name]):
logger.info("... applying HTML filter %s" % filter_name)
html = config.filters[filter_name].filter_html(
html,
currentpage=page,
categories=categories,
pages=pages,
target=target,
current_time=current_time,
mode=mode,
config=config,
logger=logger,
)
# Some filters would rather operate on a soup than a string.
# May as well parse once and re-serialize once.
soup = BeautifulSoup(html, "html.parser")
# Apply soup-based filters here
for filter_name in page_filters:
if "filter_soup" in dir(config.filters[filter_name]):
logger.info("... applying soup filter %s" % filter_name)
config.filters[filter_name].filter_soup(
soup,
currentpage=page,
categories=categories,
pages=pages,
target=target,
current_time=current_time,
mode=mode,
config=config,
logger=logger,
)
# ^ the soup filters apply to the same object, passed by reference
logger.info("... re-rendering HTML from soup...")
html2 = str(soup)
return html2 | 2,912 |
def draw_bs_pairs(x, y, func, size=1):
"""Perform pairs bootstrap for replicates."""
# Set up array of indices to sample from: inds
inds = np.arange(len(x))
# Initialize replicates
bs_replicates = np.empty(size)
# Generate replicates
for i in range(size):
bs_inds = np.random.choice(inds, len(inds))
bs_x, bs_y = x[bs_inds], y[bs_inds]
bs_replicates[i] = func(bs_x, bs_y)
return bs_replicates | 2,913 |
def has_hole(feature):
"""
Detects the number of holes in a shapely polygon or multipolygon.
Parameters
----------
feature : shapely Polygon or Multipolygon
polygon to be analyzed for holes
Returns
-------
int
number of holes
"""
if feature.geom_type == 'Polygon':
num_holes = len(feature.interiors)
elif feature.geom_type == 'MultiPolygon':
num_holes = np.sum([len(x.interiors) for x in feature])
return num_holes | 2,914 |
def linder_table(file=None, **kwargs):
"""Load Linder Model Table
Function to read in isochrone models from Linder et al. 2019.
Returns an astropy Table.
Parameters
----------
age : float
Age in Myr. If set to None, then an array of ages from the file
is used to generate dictionary. If set, chooses the closest age
supplied in table.
file : string
Location and name of COND file. See isochrones stored at
https://phoenix.ens-lyon.fr/Grids/.
Default is model.AMES-Cond-2000.M-0.0.JWST.Vega
"""
# Default file to read and load
if file is None:
base_dir = conf.PYNRC_PATH + 'linder/isochrones/'
file = base_dir + 'BEX_evol_mags_-3_MH_0.00.dat'
with open(file) as f:
content = f.readlines()
content = [x.strip('\n') for x in content]
cnames = content[2].split(',')
cnames = [name.split(':')[1] for name in cnames]
ncol = len(cnames)
content_arr = []
for line in content[4:]:
arr = np.array(line.split()).astype(np.float)
if len(arr)>0:
content_arr.append(arr)
content_arr = np.array(content_arr)
# Convert to Astropy Table
tbl = Table(rows=content_arr, names=cnames)
return tbl | 2,915 |
def json_redirect(request, url, **kwargs):
"""
Returns a JSON response for redirecting to a new URL. This is very specific
to this project and depends on the JavaScript supporting the result that
is returned from this method.
"""
if not request.is_ajax():
raise PermissionDenied("Must be an AJAX request.")
return JsonResponse({'url': url}, **kwargs) | 2,916 |
def unauthenticatedClient():
"""Retorna um api client sem ninguém autenticado"""
return APIClient() | 2,917 |
def filters_to_kcorrect(curve_file, verbose=False):
"""
Convert a filter response curve to the Kcorrect format.
This is used by Kcorrect and iSEDFit.
"""
if not os.path.isfile(curve_file):
raise IOError("# Cannot find the response curve file {}".format(curve_file))
# Read in the .txt response curve
wave, response = np.genfromtxt(curve_file, usecols=(0, 1), unpack=True)
# Output file name
prefix, _ = os.path.splitext(curve_file)
output_par = prefix + '.par'
if os.path.isfile(output_par):
if verbose:
print("# Curve {0} is already available".format(output_par))
else:
assert len(wave) == len(response), '''
Wavelength and response curve should have the same size'''
par = open(output_par, 'w')
par.write(
"# %s\n typedef struct {\n double lambda;\n double pass;\n } KFILTER;\n\n")
for w, r in zip(wave, response):
par.write("KFILTER %10.4f %11.7f\n" % (w, r))
par.close()
return wave, response | 2,918 |
def get_known_disk_attributes(model):
"""Get known NVMe/SMART attributes (model specific), returns str."""
known_attributes = KNOWN_DISK_ATTRIBUTES.copy()
# Apply model-specific data
for regex, data in KNOWN_DISK_MODELS.items():
if re.search(regex, model):
for attr, thresholds in data.items():
if attr in known_attributes:
known_attributes[attr].update(thresholds)
else:
known_attributes[attr] = thresholds
# Done
return known_attributes | 2,919 |
def table_drop_nan_columns(table: Path, output_path: Path) -> None:
"""
Drop columns with only null values from the table.
Arguments:
table: Location of the input table.
output_path: Location of the output table.
"""
with open_file_like(table, mode="r") as fd:
reader = csv.reader(line_reader(fd, skip_empty=True))
column_names = {idx: name for idx, name in enumerate(next(reader))}
# Perform a linear sweep to look for columns without a single non-null value
not_nan_columns = set()
for record in reader:
for idx, value in enumerate(record):
if value is not None and value != "":
not_nan_columns.add(idx)
# Rewind the input's buffer if possible, in case we were given a readable stream
if hasattr(table, "seek"):
table.seek(0)
else:
# We have to open and read the file twice: first to look for NaN columns and second to
# remove them. This means that if a file handle is passed to this function and seek is
# not supported, it will not work.
assert isinstance(
table, (Path, str)
), f"Input table must be a path-like object, found {type(table)}"
# Remove all null columns and write output
nan_columns = [idx for idx in column_names.keys() if idx not in not_nan_columns]
table_rename(table, output_path, {column_names[idx]: None for idx in nan_columns}) | 2,920 |
def get_one_exemplar_per_class_proximity(proximity):
"""
unpack proximity object into X, y and random_state for picking exemplars.
----
Parameters
----
proximity : Proximity object
Proximity like object containing the X, y and random_state variables
required for picking exemplars.
----
Returns
----
result : function
function choosing one exemplar per class
"""
return get_one_exemplar_per_class(proximity.X, proximity.y, proximity.random_state) | 2,921 |
def annotation_layers(state):
"""Get all annotation layer names in the state
Parameters
----------
state : dict
Neuroglancer state as a JSON dict
Returns
-------
names : list
List of layer names
"""
return [l["name"] for l in state["layers"] if l["type"] == "annotation"] | 2,922 |
def describe_bvals(bval_file) -> str:
"""Generate description of dMRI b-values."""
# Parse bval file
with open(bval_file, "r") as file_object:
raw_bvals = file_object.read().splitlines()
# Flatten list of space-separated values
bvals = [
item for sublist in [line.split(" ") for line in raw_bvals] for item in sublist
]
bvals = sorted([int(v) for v in set(bvals)])
bvals = [num_to_str(v) for v in bvals]
bval_str = list_to_str(bvals)
bval_str = "b-values of {} acquired".format(bval_str)
return bval_str | 2,923 |
def selectionToAbsoluteNames(selection, permissive='False'):
"""
Generator that converts selected nodes to long names.
i.e. absolute paths for dag nodes or instances and names for dependency (non-dag) nodes.
"selection" can either be a MSelectionList or an iterable of nodes.
if permissive, invalid nodes names (strings) are kept.
"""
pass | 2,924 |
def test_working_filter(test_microvm_with_api):
"""
Test --seccomp-filter, rejecting some dangerous syscalls.
@type: security
"""
test_microvm = test_microvm_with_api
_custom_filter_setup(test_microvm, """{
"Vmm": {
"default_action": "allow",
"filter_action": "kill_process",
"filter": [
{
"syscall": "clone"
},
{
"syscall": "execve"
}
]
},
"Api": {
"default_action": "allow",
"filter_action": "kill_process",
"filter": [
{
"syscall": "clone"
},
{
"syscall": "execve"
}
]
},
"Vcpu": {
"default_action": "allow",
"filter_action": "kill_process",
"filter": [
{
"syscall": "clone"
},
{
"syscall": "execve",
"comment": "sample comment"
}
]
}
}""".encode("utf-8"))
test_microvm.spawn()
test_microvm.basic_config()
test_microvm.start()
# level should be 2, with no additional errors
utils.assert_seccomp_level(test_microvm.jailer_clone_pid, "2") | 2,925 |
def weight_update4(weights, x_white, bias1, lrate1, b_exp):
""" Update rule for infomax
This function recieves parameters to update W1
* Input
weights : unmixing matrix (must be a square matrix)
x_white: whitened data
bias1: current estimated bias
lrate1: current learning rate
b_exp : experiment
* Output
weights : updated mixing matrix
bias: updated bias
lrate1: updated learning rate
"""
NCOMP, NVOX = (x_white.shape)
block1 = (int(np.floor(np.sqrt(NVOX / 3))))
last1 = (int(np.fix((NVOX/block1-1)*block1+1)))
if not b_exp :
permute1 = permutation(NVOX)
else :
permute1 = range(NVOX)
for start in range(0, last1, block1):
if start + block1 < NVOX:
tt2 = (start + block1 )
else:
tt2 = (NVOX)
block1 = (NVOX - start)
unmixed = (np.dot(weights, x_white[:, permute1[start:tt2]]) + bias1)
logit = 1 / (1 + np.exp(-unmixed))
weights = (weights + lrate1 * np.dot(
block1 * np.eye(NCOMP) + np.dot( (1-2*logit), unmixed.T), weights))
bias1 = (bias1 + lrate1 * (1-2*logit).sum(axis=1).reshape(bias1.shape))
# Checking if W blows up
if (np.isnan(weights)).any() or np.max(np.abs(weights)) > MAX_WEIGHT:
# ("Weight is outside the range. Restarting.")
weights = (np.eye(NCOMP))
bias1 = (np.zeros((NCOMP, 1)))
error = 1
if lrate1 > 1e-6 and \
matrix_rank(x_white) < NCOMP:
a = 1
# ("Data 1 is rank defficient"
# ". I cannot compute " +
# str(NCOMP) + " components.")
return (None, None, None, 1)
if lrate1 < 1e-6:
a = 1
# ("Weight matrix may"
# " not be invertible...")
return (None, None, None, 1)
break
else:
error = 0
return (weights, bias1, lrate1, error) | 2,926 |
def Dijkstra(graph, source):
"""
Dijkstra's algorithm for shortest path between two vertices on a graph.
Arguments
---------
graph -- directed graph; object of Graph class
source -- start vertex
>>> graph = Graph()
>>> graph.addVertex("A")
>>> conns = [ ("A", "B"), ("A", "C"), ("B", "C"), ("C", "D") ]
>>> for va, vb in conns:
... graph.addConn(va, vb)
>>> dists = Dijkstra(graph, 'A')
>>> dists['D']
2
"""
dist = {}
pq = pQ.BinaryHeap()
for node in graph:
if node != source:
dist[node] = float('inf')
else:
dist[node] = 0
pq.insert((dist[node], node))
while not pq.isEmpty():
current = pq.delMin()
for next_node in graph.getConns(current[1]):
new_dist = current[0] + 1
if new_dist < dist[next_node]:
dist[next_node] = new_dist
pq.editHeap(next_node, (dist[next_node], next_node))
return dist | 2,927 |
def update_local(base, new_path):
"""On some systems virtualenv seems to have something like a local
directory with symlinks. It appears to happen on debian systems and
it causes havok if not updated. So do that.
"""
local_dir = os.path.join(base, 'local')
if not os.path.isdir(local_dir):
return
for folder in 'bin', 'lib', 'include':
filename = os.path.join(local_dir, folder)
target = '../%s' % folder
if os.path.islink(filename) and os.readlink(filename) != target:
os.remove(filename)
os.symlink('../%s' % folder, filename)
print('L %s' % filename) | 2,928 |
def handle_post_actor_report(self, handle, connection, match, data, hdr):
"""
POST /actor/{actor-id}/report
Some actors accept external input using this function. Not always present.
Response status code: OK or NOT_FOUND
Response: Depends on actor
"""
self._actor_report(handle, connection, match, data, hdr) | 2,929 |
def check_data_dir(path):
"""
check cata_dir
"""
err = "Data path is not exist, please given a right path" \
"".format(path)
try:
assert os.isdir(path)
except AssertionError:
logger.error(err)
sys.exit(1) | 2,930 |
def test_find_number_max_repeating():
"""
Find the number which is repeated the largest number of times
"""
t = MapReduceTask(verbose=True, lazy=False)
# the order matters
@t.map
def m1(k, v):
yield v, 1
@t.reduce
def r1(k, v):
yield k, sum(v)
@t.map
def m2(k, v):
yield 'all', (k, v)
@t.reduce
def r2(k, v):
km, vm = None, None
for ki, vi in v:
if vm is None or vi > vm:
km, vm = ki, vi
yield 'max', (km, vm)
x = [1,2,3,1,2,1,4,5,6]
# print newline, so the output will be on the new line when run by pytest
print('')
assert list(t(x)) == [('max', (1, 3))] | 2,931 |
def annealing_epsilon(episode: int, min_e: float, max_e: float, target_episode: int) -> float:
"""Return an linearly annealed epsilon
Epsilon will decrease over time until it reaches `target_episode`
(epsilon)
|
max_e ---|\
| \
| \
| \
min_e ---|____\_______________(episode)
|
target_episode
slope = (min_e - max_e) / (target_episode)
intercept = max_e
e = slope * episode + intercept
Args:
episode (int): Current episode
min_e (float): Minimum epsilon
max_e (float): Maximum epsilon
target_episode (int): epsilon becomes the `min_e` at `target_episode`
Returns:
float: epsilon between `min_e` and `max_e`
"""
slope = (min_e - max_e) / (target_episode)
intercept = max_e
return max(min_e, slope * episode + intercept) | 2,932 |
def ExtendWithDefault(validator_class):
"""Takes a validator and makes it set default values on properties.
Args:
validator_class: A class to add our overridden validators to
Returns:
A validator_class that will set default values
and ignore required fields
"""
validate_properties = validator_class.VALIDATORS['properties']
def SetDefaultsInProperties(validator, user_schema, user_properties,
parent_schema):
SetDefaults(validator, user_schema or {}, user_properties,
parent_schema, validate_properties)
return jsonschema.validators.extend(
validator_class, {PROPERTIES: SetDefaultsInProperties,
REQUIRED: IgnoreKeyword}) | 2,933 |
def coach_input_line(call, school, f):
"""
Returns a properly formatted line about a coach.
:param call: (String) The beginning of the line, includes the gender, sport, and school abbreviation.
:param school:(String) The longform name of the school.
:param f: (String) The input line from the user.
:return: (String) A properly formatted line with all necessary information about a coach.
"""
f = f.split("\t")
newCall = f[2].split(" ")
for item in newCall:
call += item[0].lower()
print(call)
print(f[2])
return f"{call}\t{school}'s {coachformat(f[2])}, {f[0]} {f[1]},\t{f[0]} {f[1]},\t{f[1]}\n" | 2,934 |
def pad_seq(seq, max_length, PAD=0):
"""
:param seq: list of int,
:param max_length: int,
:return seq: list of int,
"""
seq += [PAD for i in range(max_length - len(seq))]
return seq | 2,935 |
def test_import_protobuf():
"""
Ensure the generated protobuf file is successfully importable in a dev environment.
"""
from foxglove_websocket.examples.proto.ExampleMsg_pb2 import ExampleMsg
_ = ExampleMsg | 2,936 |
def complex_domain(spectrogram):
"""
Complex Domain.
Parameters
----------
spectrogram : :class:`Spectrogram` instance
:class:`Spectrogram` instance.
Returns
-------
complex_domain : numpy array
Complex domain onset detection function.
References
----------
.. [1] Juan Pablo Bello, Chris Duxbury, Matthew Davies and Mark Sandler,
"On the use of phase and energy for musical onset detection in the
complex domain",
IEEE Signal Processing Letters, Volume 11, Number 6, 2004.
"""
# take the sum of the absolute changes
return np.asarray(np.sum(np.abs(_complex_domain(spectrogram)), axis=1)) | 2,937 |
def toOneHot(action_space, actions):
"""
If action_space is "Discrete", return a one hot vector, otherwise just return the same `actions` vector.
actions: [batch_size, 1] or [batch_size, n, 1]
If action space is continuous, just return the same action vector.
"""
# One hot encoding buffer that you create out of the loop and just keep reusing
if action_space.__class__.__name__ == "Discrete":
nr_actions = action_space.n
actions_onehot_dim = list(actions.size())
actions_onehot_dim[-1] = nr_actions
actions = actions.view(-1, 1).long()
action_onehot = torch.FloatTensor(actions.size(0), nr_actions)
return_variable = False
if isinstance(actions, Variable):
actions = actions.data
return_variable = True
# In your for loop
action_onehot.zero_()
if actions.is_cuda:
action_onehot = action_onehot.cuda()
action_onehot.scatter_(1, actions, 1)
if return_variable:
action_onehot = Variable(action_onehot)
action_onehot.view(*actions_onehot_dim)
return action_onehot
else:
return actions.detach() | 2,938 |
def find_triangle(n):
"""Find the first triangle number with N divisors."""
t, i = 1, 1
while True:
i += 1
t += i
if len(divisors(t)) > n:
return t | 2,939 |
def get_main_page_soup(home_url):
""" parse main page soup"""
user_agent= 'Mozilla / 5.0 (Windows NT 10.0; Win64; x64) AppleWebKit / 537.36(KHTML, ' \
'like Gecko) Chrome / 64.0.3282.140 Safari / 537.36 Edge / 18.17763 '
headers = {'User-agent':user_agent}
# request to javbus
res = requests.get(home_url, headers=headers, timeout=20)
res.raise_for_status()
# init beautiful soup
soup = bs4.BeautifulSoup(res.text, 'lxml')
return soup | 2,940 |
def gen_task4() -> np.ndarray:
"""Task 4: main corner of a triangle."""
canv = blank_canvas()
r, c = np.random.randint(GRID-2, size=2, dtype=np.int8)
syms = rand_syms(6) # 6 symbols for triangle
# Which orientation? We'll create 4
rand = np.random.rand()
if rand < 0.25:
# top left
rows, cols = [r, r, r, r+1, r+1, r+2], [c, c+1, c+2, c, c+1, c]
elif rand < 0.50:
# top right
rows, cols = [r, r, r, r+1, r+1, r+2], [c+2, c, c+1, c+1, c+2, c+2]
elif rand < 0.75:
# bottom left
rows, cols = [r+2, r, r+1, r+1, r+2, r+2], [c, c, c, c+1, c+1, c+2]
else:
# bottom right
rows, cols = [r+2, r, r+1, r+1, r+2, r+2], [c+2, c+2, c+1, c+2, c, c+1]
canv[rows, cols] = syms
return [4, syms[0]], canv | 2,941 |
def pytest_funcarg__testname(request):
"""
The testname as string, or ``None``, if no testname is known.
This is the parameter added by the test generation hook, or ``None`` if no
parameter was set, because test generation didn't add a call for this test.
"""
return getattr(request, 'param', None) | 2,942 |
def try_log_conf_file(file_path: pathlib.Path) -> bool:
"""It tries to open a log configuration file.
filePath: filePath
return: boolean (True is succeed, False otherwise)
"""
global logger
try:
with file_path.open() as f:
logger_conf = json.load(f)
logging.config.dictConfig(logger_conf)
logger = logging.getLogger(__name__)
logger.debug("logger started from %s", str(pathlib.Path.cwd()))
logger.info("%s found", str(file_path))
return True
except FileNotFoundError as e:
logger.info("%s not found: %s", str(file_path), str(e))
return False | 2,943 |
def combine_specific_viz_ids_pics(srcs: List[str], out: str = None, setup: List[str] = ('base', 'hsc', 'ae'),
skip_further=False, only_cls: List[int] = None):
"""
Combines heatmap images (visualization ids) for several old experiments for the same input images.
Depending on the setup, it creates an image with input images at the top and heatmap images below, where
each row corresponds to one experiment and each column to one input.
A row can also contain ground-truth heatmaps.
The combined heatmap images are stored on the disk according to the out parameter.
:param srcs: paths to root directories of old experiments
:param out: directory in which to put the combined images (class and seed-wise)
:param setup: types of experiments/rows, need to be in the order of srcs, each element has to be in OPTIONS.
"base": FCDD experiment, always needs to be the first element of setup!
"hsc": HSC experiment with gradient heatmaps.
"ae": Autoencoder experiment with reconstruction loss heatmaps.
"gts": Ground-truth heatmaps.
:param skip_further: if an experiment has more than one type of heatmap images, i.e. its logged images
contain more than 2 rows (first row is always input), consider only the first type of heatmap.
:param only_cls: list of classes, classes not part of the list are skipped, None means no classes are skipped
:return:
"""
# TODO get rid of setup?
assert all([s in OPTIONS for s in setup])
assert setup[0] == 'base'
if 'gts' in setup:
assert setup[-1] == 'gts'
if out is None:
out = srcs[0] + '_COMBINED_PAPER_PICS'
if len(srcs) != len(setup):
raise ValueError(
'fixed len of src required, {}, but found {}!'
.format(' '.join(['({}) {}'.format(i + 1, s) for i, s in enumerate(setup)]), len(srcs))
)
pics = {}
for n, src in enumerate(srcs):
cls_labels = [pt.join(src, c) for c in os.listdir(src)]
cls_labels.sort(key=pt.getmtime)
cls_labels = [pt.basename(c) for c in cls_labels]
if all([c.startswith('it_') for c in cls_labels if pt.isdir(pt.join(src, c))]): # one class experiment
cls_labels = ['.']
for cls_dir in cls_labels:
if not pt.isdir(pt.join(src, cls_dir)):
continue
assert cls_dir.startswith('normal_')
if only_cls is not None and len(only_cls) > 0 and int(cls_dir[7:]) not in only_cls:
continue
print('collecting pictures of {} {}...'.format(src, cls_dir))
for it_dir in os.listdir(pt.join(src, cls_dir)):
if pt.isfile(pt.join(src, cls_dir, it_dir)):
continue
cfg = read_cfg(pt.join(src, cls_dir, it_dir, 'config.txt'))
tims_dir = pt.join(src, cls_dir, it_dir, 'tims')
if n == 0:
if pt.exists(pt.join(tims_dir, 'specific_viz_ids')):
raise ValueError(
'First src should not contains specific viz ids, as first src should be the base!')
for root, dirs, files in os.walk(tims_dir):
for f in files:
assert f[-4:] == '.pth'
if cls_dir not in pics:
pics[cls_dir] = {}
if it_dir not in pics[cls_dir]:
pics[cls_dir][it_dir] = {}
pics[cls_dir][it_dir][f[:-4]] = [torch.load(pt.join(root, f))]
else:
if not pt.exists(pt.join(tims_dir, 'specific_viz_ids')):
raise ValueError('Src {} should contain specific viz ids, but it doesnt!'.format(src))
for root, dirs, files in os.walk(pt.join(tims_dir, 'specific_viz_ids')):
for f in files:
assert f[-4:] == '.pth'
if cls_dir == '.' and cls_dir not in pics:
warnings.warn('Seems that src {} is a one class experiment...'.format(src))
cls = 'normal_{}'.format(cfg['normal_class'])
else:
cls = cls_dir
if cls not in pics or it_dir not in pics[cls]:
raise ValueError('{} {} is missing in base src!!'.format(cls_dir, it_dir))
if setup[n] in ('ae', ):
if not f.startswith('ae_'):
continue
pics[cls][it_dir][f[3:-4]].append(torch.load(pt.join(root, f)))
else:
if f.startswith('ae_'):
raise ValueError(
'ae has been found in position {}, but shouldnt be!'.format(n)
)
pics[cls][it_dir][f[:-4]].append(torch.load(pt.join(root, f)))
logger = Logger(out)
for cls_dir in pics:
print('creating pictures for {} {}...'.format(out, cls_dir))
for it_dir in pics[cls_dir]:
for file in pics[cls_dir][it_dir]:
combined_pic = []
inps = []
gts = None
tensors = pics[cls_dir][it_dir][file]
if len(tensors) != len(srcs):
print(
'Some specific viz id tims are missing for {} {}!! Skipping them...'.format(cls_dir, it_dir),
file=sys.stderr
)
continue
# 0 == base src
t = tensors[0]
rows, cols, c, h, w = t.shape
inps.append(t[0])
if 'gts' in setup:
combined_pic.extend([*t[:2 if skip_further else -1]])
gts = t[-1]
else:
combined_pic.extend([*t[:2 if skip_further else 10000000000]])
for t in tensors[1:]:
rows, cols, c, h, w = t.shape
if rows == 3: # assume gts in final row
t = t[:-1]
inps.append(t[0])
combined_pic.append(t[1])
# ADD GTMAP
if gts is not None:
combined_pic.append(gts)
# check of all inputs have been the same
for i, s in enumerate(srcs):
for j, ss in enumerate(srcs):
if j <= i:
continue
if (inps[i] != inps[j]).sum() > 0:
raise ValueError('SRC {} and SRC {} have different inputs!!!'.format(srcs[i], srcs[j]))
# combine
new_cols = combined_pic[0].size(0)
tim = torch.cat(combined_pic)
logger.imsave(file, tim, nrow=new_cols, scale_mode='none', suffix=pt.join(cls_dir, it_dir))
print('Successfully combined pics in {}.'.format(out)) | 2,944 |
def notes_to_editor_view(notes):
"""Convert notes object content to more readble view
Args:
notes (list): list of note object
Returns:
list: list of note object
"""
for note in notes:
note.content = to_editor(note.content)
return notes | 2,945 |
def cozmo_program(robot: cozmo.robot.Robot):
"""
Main entry point for running the scoring logic in the capture the flag game.
:param robot: judge robot in the game
"""
# get number of teams playing in the game
while True:
try:
teams: int = int(input("How many teams are playing?"))
except ValueError:
print("Invalid input type")
continue
if teams < 2:
print("Must be between 2 and 3")
continue
elif teams > 3:
print("Must be between 1 and 3")
continue
else:
break
# get the id of the team the judge is on
while True:
try:
team_id: int = int(input("Which team is this judge on?"))
except ValueError:
print("Invalid input type")
continue
if team_id < 1:
print("Must be between 1 and 3")
continue
elif team_id > 3:
print("Must be between 1 and 3")
continue
else:
break
# get the corresponding team colors and opponent colors
team_colors, opponent_colors = get_team_colors(teams)
# set backpack color and head angle
robot.set_all_backpack_lights(team_colors[team_id])
robot.set_head_angle(cozmo.util.Angle(degrees=20))
# establish connection to the network and message retrieval
connection: socket.socket = start_connection("10.0.1.10", 5000)
message: List[str] = []
# setup the game
robot_cubes: List[LightCube] = setup(robot, opponent_colors[team_id])
# set default score
robot_score: int = 0
# continuously check the cube object held up to the judge and increment the score accordingly
while 'Exit' not in message:
captured_cube = None
# wait for one the cubes to be shown to the judge
try:
captured_cube = robot.world.wait_for_observed_light_cube(timeout=0.5)
except concurrent.futures._base.TimeoutError:
pass
# increment score and change cube color if the cube was valid and in-play
if captured_cube in robot_cubes:
captured_cube.set_lights(team_colors[team_id])
robot_cubes.remove(captured_cube)
robot_score += 1
# break out of the loop if the maximum score has been reached, cube wait doesn't let the loop terminate
if robot_score == 3:
break
message = receive_message(connection)
# print the win state and terminate based on scoring the maximum number of points or receiving the exit message
if robot_score == 3:
connection.send(b'Exit %d' % team_id)
print('You won!')
else:
print('Robot %s won!' % message[1]) | 2,946 |
def normalization(arr, normalize_mode, norm_range = [0,1]):
"""
Helper function: Normalizes the image based on the specified mode and range
Args:
arr: numpy array
normalize_mode: either "whiten", "normalize_clip", or "normalize" representing the type of normalization to use
norm_range: (Optional) Specifies the range for the numpy array values
Returns:
A normalized array based on the specifications
"""
# reiniating the batch_size dimension
if normalize_mode == "whiten":
return whiten(arr)
elif normalize_mode == "normalize_clip":
return normalize_clip(arr, norm_range = norm_range)
elif normalize_mode == "normalize":
return minmax_normalize(arr, norm_range = norm_range)
else:
return NotImplementedError("Please use the supported modes.") | 2,947 |
def draw_mask(img, mask, col, alpha=0.4, show_border=True, border_thick=0):
"""Visualizes a single binary mask."""
was_pil = isinstance(img, (Image.Image))
img = np.array(img)
img = img.astype(np.float32)
idx = np.nonzero(mask)
img[idx[0], idx[1], :] *= 1.0 - alpha
img[idx[0], idx[1], :] += alpha * col
if border_thick:
contours, hierarchy = cv2.findContours(
mask.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
cv2.drawContours(img, contours, -1, _WHITE, border_thick, cv2.LINE_AA)
img = img.astype(np.uint8)
return Image.fromarray(img) if was_pil else img | 2,948 |
def build_md_page(page_info: parser.PageInfo) -> str:
"""Given a PageInfo object, return markdown for the page.
Args:
page_info: Must be a `parser.FunctionPageInfo`, `parser.ClassPageInfo`, or
`parser.ModulePageInfo`.
Returns:
Markdown for the page
Raises:
ValueError: if `page_info` is an instance of an unrecognized class
"""
if isinstance(page_info, parser.ClassPageInfo):
return ClassPageBuilder(page_info).build()
if isinstance(page_info, parser.FunctionPageInfo):
return FunctionPageBuilder(page_info).build()
if isinstance(page_info, parser.ModulePageInfo):
return ModulePageBuilder(page_info).build()
if isinstance(page_info, parser.TypeAliasPageInfo):
return TypeAliasPageBuilder(page_info).build()
raise ValueError(f'Unknown Page Info Type: {type(page_info)}') | 2,949 |
def print_all_errors():
""" prints all the errors in the db in a human friendly way"""
pass | 2,950 |
def transpose(data: NodeInput, input_order: NodeInput, name: Optional[str] = None) -> Node:
"""Return a node which transposes the data in the input tensor.
@param data: The input tensor to be transposed
@param input_order: Permutation of axes to be applied to the input tensor
@return Transpose node
"""
return _get_node_factory_opset1().create("Transpose", as_nodes(data, input_order)) | 2,951 |
def find_opposite_reader(card_reader_list, find):
"""Returns the card reader on the opposite side of the door for the card reader in find"""
for c in card_reader_list:
if c.room_a == find.room_b and c.room_b == find.room_a:
return c
raise (Exception("No reader on opposite side found")) | 2,952 |
def send_mail_to_admin(email_subject, email_body):
"""Send an email to the admin email address.
The email is sent to the ADMIN_EMAIL_ADDRESS set in feconf.py.
Args:
email_subject: str. Subject of the email.
email_body: str. Body (message) of the email.
"""
app_id = app_identity_services.get_application_id()
body = '(Sent from %s)\n\n%s' % (app_id, email_body)
system_name_email = '%s <%s>' % (
feconf.SYSTEM_EMAIL_NAME, feconf.SYSTEM_EMAIL_ADDRESS)
email_services.send_mail(
system_name_email, feconf.ADMIN_EMAIL_ADDRESS, email_subject,
body, body.replace('\n', '<br/>'), bcc_admin=False) | 2,953 |
def _download_file(remote_url, target):
"""
Accepts a URL, downloads the file to a given open file object.
This is a modified version of astropy.utils.data.download_file that
downloads to an open file object instead of a cache directory.
"""
from contextlib import closing
from astropy.extern.six.moves.urllib.request import urlopen, Request
from astropy.extern.six.moves.urllib.error import URLError, HTTPError
from astropy.utils.console import ProgressBarOrSpinner
from . import conf
timeout = conf.remote_timeout
download_block_size = 32768
try:
# Pretend to be a web browser (IE 6.0). Some servers that we download
# from forbid access from programs.
headers = {'User-Agent': 'Mozilla/5.0',
'Accept': ('text/html,application/xhtml+xml,'
'application/xml;q=0.9,*/*;q=0.8')}
req = Request(remote_url, headers=headers)
with closing(urlopen(req, timeout=timeout)) as remote:
# get size of remote if available (for use in progress bar)
info = remote.info()
size = None
if 'Content-Length' in info:
try:
size = int(info['Content-Length'])
except ValueError:
pass
dlmsg = "Downloading {0}".format(remote_url)
with ProgressBarOrSpinner(size, dlmsg) as p:
bytes_read = 0
block = remote.read(download_block_size)
while block:
target.write(block)
bytes_read += len(block)
p.update(bytes_read)
block = remote.read(download_block_size)
# Append a more informative error message to HTTPErrors, URLErrors.
except HTTPError as e:
e.msg = "{}. requested URL: {!r}".format(e.msg, remote_url)
raise
except URLError as e:
append_msg = (hasattr(e, 'reason') and hasattr(e.reason, 'errno') and
e.reason.errno == 8)
if append_msg:
msg = "{0}. requested URL: {1}".format(e.reason.strerror,
remote_url)
e.reason.strerror = msg
e.reason.args = (e.reason.errno, msg)
raise e
# This isn't supposed to happen, but occasionally a socket.timeout gets
# through. It's supposed to be caught in `urrlib2` and raised in this
# way, but for some reason in mysterious circumstances it doesn't. So
# we'll just re-raise it here instead.
except socket.timeout as e:
# add the requested URL to the message (normally just 'timed out')
e.args = ('requested URL {!r} timed out'.format(remote_url),)
raise URLError(e) | 2,954 |
def project_envs() -> None:
"""Projects Environments.""" | 2,955 |
def trigger_QUIT(code, origin, line, args, text):
"""
ID: QUIT
Decription: A client session is ended with a quit message. The server must close
the connection to a client which sends a QUIT message. If a "Quit
Message" is given, this will be sent instead of the default message,
the nickname. When netsplits (disconnecting of two servers) occur, the quit message
is composed of the names of two servers involved, separated by a
space. The first name is that of the server which is still connected
and the second name is that of the server that has become
disconnected.
Format: [<Quit message>]
"""
for channel in code.chan:
if origin.nick in channel:
del code.chan[channel][origin.nick]
tmp = {
'message': '',
'nick': origin.nick,
'time': int(time.time()),
'channel': 'QUIT'
}
code.logs['bot'].append(tmp) | 2,956 |
def tag_from_clark(name):
"""Get a human-readable variant of the XML Clark notation tag ``name``.
For a given name using the XML Clark notation, return a human-readable
variant of the tag name for known namespaces. Otherwise, return the name as
is.
"""
match = CLARK_TAG_REGEX.match(name)
if match and match.group("namespace") in NAMESPACES_REV:
args = {"ns": NAMESPACES_REV[match.group("namespace")], "tag": match.group("tag")}
return "%(ns)s:%(tag)s" % args
return name | 2,957 |
def build_k_indices(y, k_fold, seed):
"""
Randomly partitions the indices of the data set into k groups
Args:
y: labels, used for indexing
k_fold: number of groups after the partitioning
seed: the random seed value
Returns:
k_indices: an array of k sub-indices that are randomly partitioned
"""
num_rows = y.shape[0]
interval = int(num_rows / k_fold)
np.random.seed(seed)
indices = np.random.permutation(num_rows)
k_indices = [indices[k * interval: (k + 1) * interval] for k in range(k_fold)]
return np.array(k_indices) | 2,958 |
def update_fixtures(
request, index, received_output, comment, testname=None, additional_information=None
):
# pylint: disable=too-many-arguments
"""Used by action plugins to generate the fixtures"""
dir_path, file_name = fixture_path_from_request(request, index, testname=testname)
os.makedirs(dir_path, exist_ok=True)
regex = "(/Users|/home).*?/tests/fixtures"
name = re.sub(regex, "/tests/fixtures", request.node.name)
name.replace("docker", "podman")
fixture = {
"name": name,
"index": index,
"comment": comment,
}
if additional_information is not None:
fixture["additional_information"] = additional_information
if additional_information["look_fors"]:
received_output = sanitize_output(received_output)
fixture["output"] = received_output
with open(f"{dir_path}/{file_name}", "w", encoding="utf8") as outfile:
json.dump(fixture, outfile, indent=4, ensure_ascii=False, sort_keys=False)
outfile.write("\n") | 2,959 |
def get_parent(obj, default=_marker):
"""Returns the container the object was traversed via.
Returns None if the object is a containment root.
Raises TypeError if the object doesn't have enough context to get the
parent.
"""
if IRoot.providedBy(obj):
return None
parent = aq_parent(aq_inner(obj))
if parent is not None:
return parent
if default != _marker:
return default
raise TypeError("Not enough context information to get parent", obj) | 2,960 |
def MRR(logits, target):
"""
Compute mean reciprocal rank.
:param logits: 2d array [batch_size x rel_docs_per_query]
:param target: 2d array [batch_size x rel_docs_per_query]
:return: mean reciprocal rank [a float value]
"""
assert logits.shape == target.shape
sorted, indices = numpy.sort(logits, 1)[::-1], numpy.argsort(-logits, 1)
reciprocal_rank = 0
for i in range(indices.shape[0]):
for j in range(indices.shape[1]):
if target[i, indices[i, j]] == 1:
reciprocal_rank += 1.0 / (j + 1)
break
return reciprocal_rank / indices.shape[0] | 2,961 |
def resetSpotIndicator():
"""
This function is run as a thread and unsets the spot indicator of GameSense after a given wait time (in seconds).
"""
resetSpotIndicator.terminate = False
deltaSleep = 0.1
epsilon = 0.001
waitTime = 10.0
currentTimeSec = lambda: time.time()
startTime = currentTimeSec()
while (abs((currentTimeSec() - startTime) <= abs(waitTime - epsilon))) and not resetSpotIndicator.terminate:
time.sleep(deltaSleep)
steelseries_gamesense.sendSpotEvent(GameSense_Name, 0)
logger.logTrace("thread finished: resetSpotIndicator.") | 2,962 |
def stream_collector(api, args, storage):
"""Pull tweets from stream."""
total_tweets = 0
total_skipped = 0
last_skipped = 0
params = to_dict(args.parameters)
while True:
try:
iterator = api.request(args.endpoint, params).get_iterator()
for item in iterator:
if 'text' in item:
total_tweets += 1
process_tweet(item, args, storage)
elif 'limit' in item:
last_skipped = item['limit']['track']
logging.info('SKIPPED %s tweets' % last_skipped)
elif 'warning' in item:
logging.warning(item['warning'])
elif 'disconnect' in item:
event = item['disconnect']
if event['code'] in [2,5,6,7]:
# streaming connection rejected
raise Exception(event)
logging.info('RE-CONNECTING: %s' % event)
break
except TwitterRequestError as e:
if e.status_code < 500:
raise
except TwitterConnectionError:
pass
finally:
total_skipped += last_skipped
last_skipped = 0
logging.info('Tweet total count = %d, Tweets skipped = %d' % (total_tweets,total_skipped))
logging.info(geocoder_stats()) | 2,963 |
def range(starts,
limits=None,
deltas=1,
dtype=None,
name=None,
row_splits_dtype=dtypes.int64):
"""Returns a `RaggedTensor` containing the specified sequences of numbers.
Each row of the returned `RaggedTensor` contains a single sequence:
```python
ragged.range(starts, limits, deltas)[i] ==
tf.range(starts[i], limits[i], deltas[i])
```
If `start[i] < limits[i] and deltas[i] > 0`, then `output[i]` will be an
empty list. Similarly, if `start[i] > limits[i] and deltas[i] < 0`, then
`output[i]` will be an empty list. This behavior is consistent with the
Python `range` function, but differs from the `tf.range` op, which returns
an error for these cases.
Examples:
>>> tf.ragged.range([3, 5, 2]).to_list()
[[0, 1, 2], [0, 1, 2, 3, 4], [0, 1]]
>>> tf.ragged.range([0, 5, 8], [3, 3, 12]).to_list()
[[0, 1, 2], [], [8, 9, 10, 11]]
>>> tf.ragged.range([0, 5, 8], [3, 3, 12], 2).to_list()
[[0, 2], [], [8, 10]]
The input tensors `starts`, `limits`, and `deltas` may be scalars or vectors.
The vector inputs must all have the same size. Scalar inputs are broadcast
to match the size of the vector inputs.
Args:
starts: Vector or scalar `Tensor`. Specifies the first entry for each range
if `limits` is not `None`; otherwise, specifies the range limits, and the
first entries default to `0`.
limits: Vector or scalar `Tensor`. Specifies the exclusive upper limits for
each range.
deltas: Vector or scalar `Tensor`. Specifies the increment for each range.
Defaults to `1`.
dtype: The type of the elements of the resulting tensor. If not specified,
then a value is chosen based on the other args.
name: A name for the operation.
row_splits_dtype: `dtype` for the returned `RaggedTensor`'s `row_splits`
tensor. One of `tf.int32` or `tf.int64`.
Returns:
A `RaggedTensor` of type `dtype` with `ragged_rank=1`.
"""
row_splits_dtype = dtypes.as_dtype(row_splits_dtype)
if limits is None:
starts, limits = 0, starts
with ops.name_scope(name, 'RaggedRange', [starts, limits, deltas]) as name:
starts = ops.convert_to_tensor(starts, dtype=dtype, name='starts')
limits = ops.convert_to_tensor(limits, dtype=dtype, name='limits')
deltas = ops.convert_to_tensor(deltas, dtype=dtype, name='deltas')
# infer dtype if not explicitly provided
if dtype is None:
starts, limits, deltas = _infer_matching_dtype(
[starts, limits, deltas],
[dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64])
result = gen_ragged_math_ops.ragged_range(
starts, limits, deltas, Tsplits=row_splits_dtype, name=name)
return ragged_tensor.RaggedTensor.from_row_splits(
result.rt_dense_values, result.rt_nested_splits, validate=False) | 2,964 |
def ecef2enuv(u, v, w, lat0, lon0, deg=True):
"""
for VECTOR i.e. between two points
input
-----
x,y,z [meters] target ECEF location [0,Infinity)
"""
if deg:
lat0 = radians(lat0)
lon0 = radians(lon0)
t = cos(lon0) * u + sin(lon0) * v
uEast = -sin(lon0) * u + cos(lon0) * v
wUp = cos(lat0) * t + sin(lat0) * w
vNorth = -sin(lat0) * t + cos(lat0) * w
return uEast, vNorth, wUp | 2,965 |
def interpolate_ray_dist(ray_dists, order='spline'):
""" interpolate ray distances
:param [float] ray_dists:
:param str order: degree of interpolation
:return [float]:
>>> vals = np.sin(np.linspace(0, 2 * np.pi, 20)) * 10
>>> np.round(vals).astype(int).tolist()
[0, 3, 6, 8, 10, 10, 9, 7, 5, 2, -2, -5, -7, -9, -10, -10, -8, -6, -3, 0]
>>> vals[3:7] = -1
>>> vals[16:] = -1
>>> vals_interp = interpolate_ray_dist(vals, order=3)
>>> np.round(vals_interp).astype(int).tolist()
[0, 3, 6, 9, 10, 10, 8, 7, 5, 2, -2, -5, -7, -9, -10, -10, -10, -8, -4, 1]
>>> vals_interp = interpolate_ray_dist(vals, order='spline')
>>> np.round(vals_interp).astype(int).tolist()
[0, 3, 6, 8, 9, 10, 9, 7, 5, 2, -2, -5, -7, -9, -10, -10, -9, -7, -5, -3]
>>> vals_interp = interpolate_ray_dist(vals, order='cos')
>>> np.round(vals_interp).astype(int).tolist()
[0, 3, 6, 8, 10, 10, 9, 7, 5, 2, -2, -5, -7, -9, -10, -10, -8, -6, -3, 0]
"""
x_space = np.arange(len(ray_dists))
ray_dists = np.array(ray_dists)
missing = ray_dists == -1
x_train = x_space[ray_dists != -1]
x_train_ext = np.hstack((x_train - len(x_space),
x_train,
x_train + len(x_space)))
y_train = ray_dists[ray_dists != -1]
y_train_ext = np.array(y_train.tolist() * 3)
if isinstance(order, int):
# model = pipeline.make_pipeline(preprocessing.PolynomialFeatures(order),
# linear_model.Ridge())
# model.fit(x_space[ray_dists != -1], ray_dists[ray_dists != -1])
# ray_dists[ray_dists == -1] = model.predict(x_space[ray_dists == -1])
z = np.polyfit(x_train, y_train, order)
fn_interp = np.poly1d(z)
ray_dists[missing] = fn_interp(x_space[missing])
elif order == 'spline':
uinterp_us = interpolate.InterpolatedUnivariateSpline(x_train_ext,
y_train_ext)
ray_dists[missing] = uinterp_us(x_space[missing])
elif order == 'cos':
def _fn_cos(x, t):
return x[0] + x[1] * np.sin(x[2] + x[3] * t)
def _fn_cos_residual(x, t, y):
return _fn_cos(x, t) - y
x0 = np.array([np.mean(y_train), (y_train.max() - y_train.min()) / 2.,
0, len(x_space) / np.pi])
lsm_res = optimize.least_squares(_fn_cos_residual, x0, gtol=1e-1,
# loss='soft_l1', f_scale=0.1,
args=(x_train, y_train))
ray_dists[missing] = _fn_cos(lsm_res.x, x_space[missing])
return ray_dists | 2,966 |
def distance(left, right, pairwise=pairwise['prod'], distance_function=None):
"""
Calculate the distance between two *k*-mer profiles.
:arg left, right: Profiles to calculate distance
between.
:return: The distance between `left` and `right`.
:rtype: float
"""
if not distance_function:
return multiset(left, right, pairwise)
return distance_function(left, right) | 2,967 |
def _rec_get_all_imports_exports(fips_dir, proj_dir, result) :
"""recursively get all imported projects, their exported and
imported modules in a dictionary object:
project-1:
url: git-url (not valid for first, top-level project)
exports:
header-dirs: [ ]
conditional-header-dirs:
dir: cmake-if condition string
lib-dirs: [ ]
defines:
def-key: def-val
...
modules :
mod: dir
mod: dir
...
imports:
name:
git: [git-url]
branch: [optional: branch or tag]
cond: [optional: cmake-if condition string conditionally including the dependency]
name:
...
...
...
:param fips_dir: absolute fips directory
:param proj_dir: absolute project directory
:param result: in/out current result
:returns: bool success, and modified result dictionary
"""
success = True
ws_dir = util.get_workspace_dir(fips_dir)
proj_name = util.get_project_name_from_dir(proj_dir)
if proj_name not in result :
imports = get_imports(fips_dir, proj_dir)
exports = get_exports(proj_dir)
for dep_proj_name in imports :
if dep_proj_name not in result :
dep_proj_dir = util.get_project_dir(fips_dir, dep_proj_name)
dep_url = imports[dep_proj_name]['git']
success, result = _rec_get_all_imports_exports(fips_dir, dep_proj_dir, result)
# break recursion on error
if not success :
return success, result
result[proj_name] = {}
result[proj_name]['proj_dir'] = proj_dir
result[proj_name]['imports'] = imports
result[proj_name]['exports'] = exports
# done
return success, result | 2,968 |
def main():
"""Main entry point"""
current_time = datetime.now()
local_time = current_time.astimezone(get_localzone())
if is_day(local_time):
time_string = datetime.strftime(local_time, "%Y%m%d-%H%M%S")
date_string = datetime.strftime(local_time, "%Y%m%d")
picture_directory = PICTURE_ROOT + date_string + "/"
picture_name = time_string + ".jpg"
take_picture(picture_directory, picture_name)
append_pictures_json(picture_directory, picture_name)
append_dates_json(date_string)
else:
#sleep
exit() | 2,969 |
def get_database_cluster(name: Optional[str] = None,
tags: Optional[Sequence[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDatabaseClusterResult:
"""
Provides information on a DigitalOcean database cluster resource.
## Example Usage
```python
import pulumi
import pulumi_digitalocean as digitalocean
example = digitalocean.get_database_cluster(name="example-cluster")
pulumi.export("databaseOutput", example.uri)
```
:param str name: The name of the database cluster.
"""
__args__ = dict()
__args__['name'] = name
__args__['tags'] = tags
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('digitalocean:index/getDatabaseCluster:getDatabaseCluster', __args__, opts=opts, typ=GetDatabaseClusterResult).value
return AwaitableGetDatabaseClusterResult(
database=__ret__.database,
engine=__ret__.engine,
host=__ret__.host,
id=__ret__.id,
maintenance_windows=__ret__.maintenance_windows,
name=__ret__.name,
node_count=__ret__.node_count,
password=__ret__.password,
port=__ret__.port,
private_host=__ret__.private_host,
private_network_uuid=__ret__.private_network_uuid,
private_uri=__ret__.private_uri,
region=__ret__.region,
size=__ret__.size,
tags=__ret__.tags,
uri=__ret__.uri,
urn=__ret__.urn,
user=__ret__.user,
version=__ret__.version) | 2,970 |
def caller_name(skip=2):
"""Get a name of a caller module
`skip` specifies how many levels of stack to skip while getting caller
name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.
An empty string is returned if skipped levels exceed stack height
References:
--------
https://gist.github.com/techtonik/2151727
"""
def stack_(frame):
framelist = []
while frame:
framelist.append(frame)
frame = frame.f_back
return framelist
stack = stack_(sys._getframe(1))
start = 0 + skip
if len(stack) < start + 1:
return ''
parentframe = stack[start]
module = getmodule(parentframe)
if module:
ret_name = module.__name__
else:
ret_name = __name__
return ret_name | 2,971 |
def get_dataset():
"""Summary
Returns
-------
TYPE
Description
"""
stms = []
for dirpath, dirnames, filenames in os.walk('TEDLIUM_release2'):
for f in filenames:
if f.endswith('stm'):
stms.append(os.path.join(dirpath, f))
data = []
for stm_i in stms:
with open(stm_i, 'r') as fp:
lines = fp.readlines()
for line_i in lines:
sp = line_i.split()
data.append({
'id': sp[0],
'num': sp[1],
'id2': sp[2],
'start_time': sp[3],
'end_time': sp[4],
'ch': 'wideband' if 'f0' in sp[5] else 'telephone',
'sex': 'male' if 'male' in sp[5] else 'female',
'text': " ".join(
sp[6:]) if sp[6] != 'ignore_time_segment_in_scoring' else ''})
for max_duration in range(30):
durations = []
for stm_i in stms:
with open(stm_i, 'r') as fp:
lines = fp.readlines()
for line_i in lines:
sp = line_i.split()
dur = float(sp[4]) - float(sp[3])
if dur < max_duration:
durations.append(dur)
return data, durations | 2,972 |
def tesla_loadhook(h, *args, **kwargs):
"""
Converts a load hook into an application processor.
>>> app = auto_application()
>>> def f(*args, **kwargs): "something done before handling request"
...
>>> app.add_processor(loadhook(f, *args, **kwargs))
"""
def processor(handler):
h(*args, **kwargs)
return handler()
return processor | 2,973 |
def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'): #pragma: no cover
"""
Force a string to be unicode.
If strings_only is True, don't convert (some) non-string-like objects.
Originally copied from the Django source code, further modifications have
been made.
Original copyright and license:
Copyright (c) Django Software Foundation and individual contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of Django nor the names of its contributors may be used
to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
if strings_only and is_protected_type(s):
return s
if not isinstance(s, str,):
if hasattr(s, '__unicode__'):
s = str(s)
else:
try:
s = str(str(s), encoding, errors)
except UnicodeEncodeError:
if not isinstance(s, Exception):
raise
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII data without special
# handling to display as a string. We need to handle this
# without raising a further exception. We do an
# approximation to what the Exception's standard str()
# output should be.
s = ' '.join([force_unicode(arg, encoding, strings_only,
errors) for arg in s])
elif not isinstance(s, str):
# Note: We use .decode() here, instead of unicode(s, encoding,
# errors), so that if s is a SafeString, it ends up being a
# SafeUnicode at the end.
s = s.decode(encoding, errors)
return s | 2,974 |
def extractYoloInfo(yolo_output_format_data):
""" Extract box, objectness, class from yolo output format data """
box = yolo_output_format_data[..., :6]
conf = yolo_output_format_data[..., 6:7]
category = yolo_output_format_data[..., 7:]
return box, conf, category | 2,975 |
def bbox_diou(bboxes1, bboxes2):
"""
Complete IoU
@param bboxes1: (a, b, ..., 4)
@param bboxes2: (A, B, ..., 4)
x:X is 1:n or n:n or n:1
@return (max(a,A), max(b,B), ...)
ex) (4,):(3,4) -> (3,)
(2,1,4):(2,3,4) -> (2,3)
"""
bboxes1_area = bboxes1[..., 2] * bboxes1[..., 3]
bboxes2_area = bboxes2[..., 2] * bboxes2[..., 3]
bboxes1_coor = tf.concat(
[
bboxes1[..., :2] - bboxes1[..., 2:] * 0.5,
bboxes1[..., :2] + bboxes1[..., 2:] * 0.5,
],
axis=-1,
)
bboxes2_coor = tf.concat(
[
bboxes2[..., :2] - bboxes2[..., 2:] * 0.5,
bboxes2[..., :2] + bboxes2[..., 2:] * 0.5,
],
axis=-1,
)
left_up = tf.maximum(bboxes1_coor[..., :2], bboxes2_coor[..., :2])
right_down = tf.minimum(bboxes1_coor[..., 2:], bboxes2_coor[..., 2:])
inter_section = tf.maximum(right_down - left_up, 0.0)
inter_area = inter_section[..., 0] * inter_section[..., 1]
union_area = bboxes1_area + bboxes2_area - inter_area
iou = tf.math.divide_no_nan(inter_area, union_area)
enclose_left_up = tf.minimum(bboxes1_coor[..., :2], bboxes2_coor[..., :2])
enclose_right_down = tf.maximum(
bboxes1_coor[..., 2:], bboxes2_coor[..., 2:]
)
enclose_section = enclose_right_down - enclose_left_up
c_2 = enclose_section[..., 0] ** 2 + enclose_section[..., 1] ** 2
center_diagonal = bboxes2[..., :2] - bboxes1[..., :2]
rho_2 = center_diagonal[..., 0] ** 2 + center_diagonal[..., 1] ** 2
diou = iou - tf.math.divide_no_nan(rho_2, c_2)
return diou | 2,976 |
def showp1rev(context, mapping):
"""Integer. The repository-local revision number of the changeset's
first parent, or -1 if the changeset has no parents. (DEPRECATED)"""
ctx = context.resource(mapping, b'ctx')
return ctx.p1().rev() | 2,977 |
def read_object(ctx, pin, object_id):
"""
Read arbitrary PIV object.
Read PIV object by providing the object id.
\b
OBJECT-ID Id of PIV object in HEX.
"""
controller = ctx.obj['controller']
def do_read_object(retry=True):
try:
click.echo(controller.get_data(object_id))
except APDUError as e:
if e.sw == SW.NOT_FOUND:
ctx.fail('No data found.')
elif e.sw == SW.SECURITY_CONDITION_NOT_SATISFIED:
_verify_pin(ctx, controller, pin)
do_read_object(retry=False)
else:
raise
do_read_object() | 2,978 |
def erp_pretax(t,ma,st,ra,par):
""" early retirement pension (efterløn) pretax"""
# initialize
ERP = np.zeros(1)
# pre two year period
if par.T_erp <= t < par.T_two_year:
if ra == 1:
priv = priv_pension(ma,st,par)
ERP[:] = np.maximum(0,par.ERP_high - 0.6*0.05*np.maximum(0, priv - par.ERP_low))
# two year period
elif par.T_two_year <= t < par.T_oap:
# two year rule is satisfied
if ra == 0:
ERP[:] = par.ERP_2
# two year rule not satisfied
elif ra == 1:
priv = priv_pension(ma,st,par)
ERP[:] = np.maximum(0,par.ERP_high - 0.6*0.05*np.maximum(0, priv - par.ERP_low))
# return
return ERP | 2,979 |
def main(gen5tt_algo, fs_file, num_tracts, participant_label, session_label, t1_file, eddy_file, bvec_file, bval_file, template_file, atlas_file, output_dir):
"""Console script for tractify."""
work_dir = os.path.join(output_dir, "scratch")
# Set parameters based on CLI, pass through object
parameters = Parameters(
t1_file=t1_file,
fs_file=fs_file,
eddy_file=eddy_file,
bval_file=bval_file,
bvec_file=bvec_file,
work_dir=work_dir,
output_dir=output_dir,
template_file=template_file,
atlas_file=atlas_file,
gen5tt_algo=gen5tt_algo,
num_tracts=num_tracts
)
if (gen5tt_algo == 'freesurfer'):
try:
os.environ["SUBJECTS_DIR"]
except:
print("No SUBJECTS_DIR environment variable found for"
" freesurfer, using '" + os.path.dirname(fs_file) + "' instead")
os.environ["SUBJECTS_DIR"] = os.path.dirname(fs_file)
wf = init_single_ses_wf(participant_label, session_label, parameters)
wf.base_dir = parameters.work_dir
wf.write_graph(graph2use="colored")
wf.config["execution"]["remove_unnecessary_outputs"] = False
wf.config["execution"]["keep_inputs"] = True
wf.run()
# Output the sse file to a text output
# Get string of sse output value
sse_node = next(node.replace('.', '/') for node in wf.list_node_names() if 'dtifit' in node)
# Get the paths of the
subject_session_base = 'single_subject_' + participant_label + '_wf'
sse_file = os.path.join(work_dir, subject_session_base, sse_node, 'dtifit__sse.nii.gz')
# If the sse was generated
if (os.path.isfile(sse_file)):
sse_txt_base = 'sub_' + participant_label + '_ses_' + session_label + '_sse.txt'
sse_txt_scratch = os.path.join(work_dir, subject_session_base, sse_node, sse_txt_base)
# Run the fslstats command on the sse and redirect it to a text output
sse_dtifit_value_command = ['fslstats' , sse_file, '-M']
my_env = os.environ.copy()
my_env["PATH"] = "/usr/sbin:/sbin:" + my_env["PATH"]
sse_txt_file = open(sse_txt_scratch, "w")
subprocess.call(sse_dtifit_value_command, stdout=sse_txt_file)
sse_txt_file.close()
print('Output sse text value is in ' + sse_txt_scratch)
else:
print("SSE wasn't generated, will not output merged text value")
return 0 | 2,980 |
def add_missing_cmd(command_list):
"""Adds missing cmd tags to the given command list."""
# E.g.: given:
# ['a', '0', '0', '0', '0', '0', '0', '0',
# '0', '0', '0', '0', '0', '0', '0']
# Converts to:
# [['a', '0', '0', '0', '0', '0', '0', '0'],
# ['a', '0', '0', '0', '0', '0', '0', '0']]
# And returns a string that joins these elements with spaces.
cmd_tag = command_list[0]
args = command_list[1:]
final_cmds = []
for arg_batch in grouper(args, NUM_ARGS[cmd_tag]):
final_cmds.append([cmd_tag] + list(arg_batch))
if not final_cmds:
# command has no args (e.g.: 'z')
final_cmds = [[cmd_tag]]
return final_cmds | 2,981 |
def replace_umlauts(s: str) -> str:
"""
Replace special symbols with the letters with umlauts (ä, ö and ü)
:param s: string with the special symbols (::)
:return: edited string
"""
out = s.replace('A::', 'Ä').replace('O::', 'Ö').replace('U::', 'Ü').replace('a::', 'ä').replace('o::', 'ö') \
.replace('u::', 'ü')
return out | 2,982 |
def bandstructure_flow(workdir, scf_input, nscf_input, dos_inputs=None, manager=None, flow_class=Flow, allocate=True):
"""
Build a :class:`Flow` for band structure calculations.
Args:
workdir: Working directory.
scf_input: Input for the GS SCF run.
nscf_input: Input for the NSCF run (band structure run).
dos_inputs: Input(s) for the NSCF run (dos run).
manager: :class:`TaskManager` object used to submit the jobs
Initialized from manager.yml if manager is None.
flow_class: Flow subclass
allocate: True if the flow should be allocated before returning.
Returns:
:class:`Flow` object
"""
flow = flow_class(workdir, manager=manager)
work = BandStructureWork(scf_input, nscf_input, dos_inputs=dos_inputs)
flow.register_work(work)
# Handy aliases
flow.scf_task, flow.nscf_task, flow.dos_tasks = work.scf_task, work.nscf_task, work.dos_tasks
if allocate: flow.allocate()
return flow | 2,983 |
def beautify_ax(ax, edge_color, face_color):
"""
Beautifies an ax object by adjusting axis and ticks and changing colors.
:param ax:
:return:
"""
# set ticks only on the left and the bottom
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
# change ticks length and width
ax.xaxis.set_tick_params(direction='in', length=3, width=0.5,)
ax.yaxis.set_tick_params(direction='in', length=3, width=0.5)
# change axis spines width and color
ax.spines['left'].set_linewidth(0.5)
ax.spines['bottom'].set_linewidth(0.5)
ax.spines['left'].set_color(edge_color)
ax.spines['bottom'].set_color(edge_color)
# set patch face color
ax.patch.set_facecolor(face_color)
# change ticks and labels color
ax.tick_params(axis='both', colors=edge_color, which='both') | 2,984 |
def get_modules():
"""Returns the list of module names
"""
def listdir(dir):
def clean(name):
name = os.path.basename(name)
if name[-4:] == '.zip':
name = name[:-4]
return name
def is_really_module(name):
for mname in MANIFEST_NAMES:
if os.path.isfile(opj(dir, name, mname)):
return True
return map(clean, filter(is_really_module, os.listdir(dir)))
plist = []
initialize_sys_path()
for ad in ad_paths:
plist.extend(listdir(ad))
return list(set(plist)) | 2,985 |
def resnet_50(num_classes, data_format='channels_first', pruning_method=None):
"""Returns the ResNet model for a given size and number of output classes."""
return resnet_50_generator(
block_fn=bottleneck_block_,
lst_layers=[3, 4, 6, 3],
num_classes=num_classes,
pruning_method=pruning_method,
data_format=data_format) | 2,986 |
def read_amuselabs_data(s):
"""
Read in an amuselabs string, return a dictionary of data
"""
# Data might be base64'd or not
try:
data = json.loads(s)
except json.JSONDecodeError:
s1 = base64.b64decode(s)
data = json.loads(s1)
ret = {}
# metadata
# technically these can be codewords but i've never seen one
kind = "crossword"
width, height = data['w'], data['h']
ret['metadata'] = {
'width': width
, 'height': height
, 'kind': kind
, 'author': data.get('author')
, 'title': data.get('title')
, 'copyright': data.get('copyright')
, 'noClueCells': True
# no notepad?
}
# grid
grid = []
box = data['box']
cellInfos = data.get('cellInfos', [])
# Reshape cellInfos to make lookup easier
markup = {}
for c in cellInfos:
markup[(c['x'], c['y'])] = c
for y in range(height):
for x in range(width):
cell = {'x': x, 'y': y, 'value': None}
if box[x][y] == '\x00':
cell['isBlock'] = True
else:
cell['solution'] = box[x][y]
style = {}
if markup.get((x, y)):
thisMarkup = markup[(x, y)]
if thisMarkup.get('isCircled'):
style['shapebg'] = 'circle'
if thisMarkup.get('isVoid'):
cell['isBlock'] = False
cell['isVoid'] = True
bar_string = ''
for letter, side in {'B': 'bottom', 'R': 'right'}.items():
if thisMarkup.get(f'{side}Wall'):
bar_string += letter
if bar_string:
style['barred'] = bar_string
cell['style'] = style
grid.append(cell)
ret['grid'] = grid
# clues
placed_words = data['placedWords']
across_words = [word for word in placed_words if word['acrossNotDown']]
down_words = [word for word in placed_words if not word['acrossNotDown']]
# sorting is probably unnecessary
across_words = sorted(across_words, key=lambda x: (x['y'], x['x']))
down_words = sorted(down_words, key=lambda x: (x['y'], x['x']))
across_clues = [{'number': str(x['clueNum']), 'clue': x['clue']['clue']} for x in across_words]
down_clues = [{'number': str(x['clueNum']), 'clue': x['clue']['clue']} for x in down_words]
ret['clues'] = [{'title': 'Across', 'clues': across_clues}, {'title': 'Down', 'clues': down_clues}]
return ret | 2,987 |
def calinski_harabasz(dataset_values:DatasetValues):
"""Calinski, T.; Harabasz, J. (1974). A dendrite method for cluster analysis.
Communications in Statistics - Theory and Methods, v.3, n.1, p.1�27.
The objective is maximize value [0, +Inf]"""
if dataset_values.K == 1:
return 0
return calinski_harabasz_score(dataset_values.data, dataset_values.cluster_labels) | 2,988 |
def parse_version(version):
"""
input version string of the form:
'Major.Minor.Patch+CommitHash'
like:
'0.1.5+95ffef4'
------ or ------
'0.1.0'
returns version_info tuple of the form:
(major,minor,patch,hash)
like:
(0, 1, 5, '95ffef4')
-------- or --------
(0, 1, 0, '')
"""
matches = match(
'(?P<major>[0-9]+)\.(?P<minor>[0-9]+)\.(?P<patch>[0-9]+)(g(?P<hash>[a-z0-9]*))?',
version,
IGNORECASE
)
if matches:
major = int(matches.group('major'))
minor = int(matches.group('minor'))
patch = int(matches.group('patch'))
hash = matches.group('hash') or ''
return (major,minor,patch,hash)
else:
raise ValueError("Version string, '%s' could not be parsed. It should be of the form: 'Major.Minor.Patch+CommitHash'." % version) | 2,989 |
def _location_sensitive_score(W_query, W_fil, W_keys):
"""Impelements Bahdanau-style (cumulative) scoring function.
This attention is described in:
J. K. Chorowski, D. Bahdanau, D. Serdyuk, K. Cho, and Y. Ben-
gio, “Attention-based models for speech recognition,” in Ad-
vances in Neural Information Processing Systems, 2015, pp.
577–585.
#############################################################################
hybrid attention (content-based + location-based)
f = F * α_{i-1}
energy = dot(v_a, tanh(W_keys(h_enc) + W_query(h_dec) + W_fil(f) + b_a))
#############################################################################
Args:
W_query: Tensor, shape '[batch_size, 1, attention_dim]' to compare to location features.
W_location: processed previous alignments into location features, shape '[batch_size, max_time, attention_dim]'
W_keys: Tensor, shape '[batch_size, max_time, attention_dim]', typically the encoder outputs.
Returns:
A '[batch_size, max_time]' attention score (energy)
"""
# Get the number of hidden units from the trailing dimension of keys
dtype = W_query.dtype
num_units = W_keys.shape[-1].value or array_ops.shape(W_keys)[-1]
v_a = tf.get_variable(
"attention_variable_projection",
shape=[num_units],
dtype=dtype,
initializer=tf.contrib.layers.xavier_initializer(),
trainable=True,
)
print(v_a)
b_a = tf.get_variable(
"attention_bias",
shape=[num_units],
dtype=dtype,
initializer=tf.zeros_initializer(),
)
return tf.reduce_sum(v_a * tf.tanh(W_keys + W_query + W_fil + b_a), [2]) | 2,990 |
def get_streamdecks():
"""
Retrieves all connected streamdecks
"""
streamdecks = DeviceManager().enumerate()
return streamdecks | 2,991 |
def clean_string(text):
"""
Remove Lucene reserved characters from query string
"""
if isinstance(text, six.string_types):
return text.translate(UNI_SPECIAL_CHARS).strip()
return text.translate(None, STR_SPECIAL_CHARS).strip() | 2,992 |
def convert_single_example(example_index, example, label_size, max_seq_length,
tokenizer, max_qa_length):
"""Loads a data file into a list of `InputBatch`s."""
# RACE is a multiple choice task. To perform this task using AlBERT,
# we will use the formatting proposed in "Improving Language
# Understanding by Generative Pre-Training" and suggested by
# @jacobdevlin-google in this issue
# https://github.com/google-research/bert/issues/38.
#
# Each choice will correspond to a sample on which we run the
# inference. For a given RACE example, we will create the 4
# following inputs:
# - [CLS] context [SEP] choice_1 [SEP]
# - [CLS] context [SEP] choice_2 [SEP]
# - [CLS] context [SEP] choice_3 [SEP]
# - [CLS] context [SEP] choice_4 [SEP]
# The model will output a single value for each input. To get the
# final decision of the model, we will run a softmax over these 4
# outputs.
if isinstance(example, classifier_utils.PaddingInputExample):
return classifier_utils.InputFeatures(
example_id=0,
input_ids=[[0] * max_seq_length] * label_size,
input_mask=[[0] * max_seq_length] * label_size,
segment_ids=[[0] * max_seq_length] * label_size,
label_id=0,
is_real_example=False)
else:
context_tokens = tokenizer.tokenize(example.context_sentence)
if example.start_ending is not None:
start_ending_tokens = tokenizer.tokenize(example.start_ending)
all_input_tokens = []
all_input_ids = []
all_input_mask = []
all_segment_ids = []
for ending in example.endings:
# We create a copy of the context tokens in order to be
# able to shrink it according to ending_tokens
context_tokens_choice = context_tokens[:]
if example.start_ending is not None:
ending_tokens = start_ending_tokens + tokenizer.tokenize(ending)
else:
ending_tokens = tokenizer.tokenize(ending)
# Modifies `context_tokens_choice` and `ending_tokens` in
# place so that the total length is less than the
# specified length. Account for [CLS], [SEP], [SEP] with
# "- 3"
ending_tokens = ending_tokens[- max_qa_length:]
if len(context_tokens_choice) + len(ending_tokens) > max_seq_length - 3:
context_tokens_choice = context_tokens_choice[: (
max_seq_length - 3 - len(ending_tokens))]
tokens = ["[CLS]"] + context_tokens_choice + (
["[SEP]"] + ending_tokens + ["[SEP]"])
segment_ids = [0] * (len(context_tokens_choice) + 2) + [1] * (
len(ending_tokens) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
all_input_tokens.append(tokens)
all_input_ids.append(input_ids)
all_input_mask.append(input_mask)
all_segment_ids.append(segment_ids)
label = example.label
if example_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("id: {}".format(example.example_id))
for choice_idx, (tokens, input_ids, input_mask, segment_ids) in \
enumerate(zip(all_input_tokens, all_input_ids, all_input_mask, all_segment_ids)):
tf.logging.info("choice: {}".format(choice_idx))
tf.logging.info("tokens: {}".format(" ".join(tokens)))
tf.logging.info(
"input_ids: {}".format(" ".join(map(str, input_ids))))
tf.logging.info(
"input_mask: {}".format(" ".join(map(str, input_mask))))
tf.logging.info(
"segment_ids: {}".format(" ".join(map(str, segment_ids))))
tf.logging.info("label: {}".format(label))
return classifier_utils.InputFeatures(
example_id=example.example_id,
input_ids=all_input_ids,
input_mask=all_input_mask,
segment_ids=all_segment_ids,
label_id=label
) | 2,993 |
def GetAllProperties(layers='core'):
"""Return all properties in the graph."""
global Utc
KEY = "AllProperties:%s" % layers
if DataCache.get(KEY,Utc):
#logging.debug("DataCache HIT: %s" % KEY)
return DataCache.get(KEY,Utc)
else:
#logging.debug("DataCache MISS: %s" % KEY)
mynode = Unit.GetUnit("Thing")
props = GetSources(Unit.GetUnit("rdf:type", True), Unit.GetUnit("rdf:Property", True), layers=EVERYLAYER)
res = []
for prop in props:
if inLayer(layers,prop):
res.append(prop)
sorted_all_properties = sorted(res, key=lambda u: u.id)
DataCache.put(KEY,sorted_all_properties,Utc)
return sorted_all_properties | 2,994 |
def _single_optimal_block(x: NDArray) -> Tuple[float, float]:
"""
Compute the optimal window length for a single series
Parameters
----------
x : ndarray
The data to use in the optimal window estimation
Returns
-------
stationary : float
Estimated optimal window length for stationary bootstrap
circular : float
Estimated optimal window length for circular bootstrap
"""
nobs = x.shape[0]
eps = x - x.mean(0)
b_max = np.ceil(min(3 * np.sqrt(nobs), nobs / 3))
kn = max(5, int(np.log10(nobs)))
m_max = int(np.ceil(np.sqrt(nobs))) + kn
# Find first collection of kn autocorrelations that are insignificant
cv = 2 * np.sqrt(np.log10(nobs) / nobs)
acv = np.zeros(m_max + 1)
abs_acorr = np.zeros(m_max + 1)
opt_m: Optional[int] = None
for i in range(m_max + 1):
v1 = eps[i + 1 :] @ eps[i + 1 :]
v2 = eps[: -(i + 1)] @ eps[: -(i + 1)]
cross_prod = eps[i:] @ eps[: nobs - i]
acv[i] = cross_prod / nobs
abs_acorr[i] = np.abs(cross_prod) / np.sqrt(v1 * v2)
if i >= kn:
if np.all(abs_acorr[i - kn : i] < cv) and opt_m is None:
opt_m = i - kn
m = 2 * max(opt_m, 1) if opt_m is not None else m_max
m = min(m, m_max)
g = 0.0
lr_acv = acv[0]
for k in range(1, m + 1):
lam = 1 if k / m <= 1 / 2 else 2 * (1 - k / m)
g += 2 * lam * k * acv[k]
lr_acv += 2 * lam * acv[k]
d_sb = 2 * lr_acv ** 2
d_cb = 4 / 3 * lr_acv ** 2
b_sb = ((2 * g ** 2) / d_sb) ** (1 / 3) * nobs ** (1 / 3)
b_cb = ((2 * g ** 2) / d_cb) ** (1 / 3) * nobs ** (1 / 3)
b_sb = min(b_sb, b_max)
b_cb = min(b_cb, b_max)
return b_sb, b_cb | 2,995 |
def predict(params, X):
"""
Using the learned parameters, predicts a class for each example in X
Arguments:
parameters -- python dictionary containing your parameters
X -- input data of size (n_x, m)
Returns
predictions -- vector of predictions of our model (red: 0 / blue: 1)
"""
# Computes probabilities using forward propagation, and classifies to 0/1 using 0.5 as the threshold.
A2, cache = forward_propagation(X, params)
predictions = np.round(A2)
return predictions | 2,996 |
def RedirectFilterPageGenerator(generator):
"""
Wraps around another generator. Yields only those pages that are not redirects.
"""
for page in generator:
if not page.isRedirectPage():
yield page | 2,997 |
def response(request):
"""
返回相应对象
:param request:
:return:
"""
json_str = '{"name": "张三", "age": 18}' # 整体是个字符串
response = HttpResponse(json_str,
content_type="application/json",
status=200)
response["dev"] = "aGrass0825" # 向响应头中添加内容
return response | 2,998 |
def nest_to_flat_dict(nest):
"""Convert a nested structure into a flat dictionary.
Args:
nest: A nested structure.
Returns:
flat_dict: A dictionary with strings keys that can be converted back into
the original structure via `flat_dict_to_nest`.
"""
flat_sequence = tf.nest.flatten(nest)
return {str(k): v for k, v in enumerate(flat_sequence)} | 2,999 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.