content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def init_validator(required, cls, *additional_validators):
"""
Create an attrs validator based on the cls provided and required setting.
:param bool required: whether the field is required in a given model.
:param cls: the expected class type of object value.
:return: attrs validator chained correctly (e.g. optional(instance_of))
"""
validator = validators.instance_of(cls)
if additional_validators:
additional_validators = list(additional_validators)
additional_validators.append(validator)
validator = composite(*additional_validators)
return validator if required else validators.optional(validator) | 4,700 |
def test_network_switch_forwards_packets_received_from_network_interfaces():
"""Validate packet with destination matching one interface is routed up.
We send three packets: one from 'eth', one from 'wifi' and one from 'user'.
Make sure that in any case the packet is routed to user.
"""
sim, ns, wifi, eth = Mock(), Mock(), Mock(), Mock()
switch = NetworkSwitch(sim)
eth.address = 1
eth_rev_conn = Mock()
eth.connections.set = Mock(return_value=eth_rev_conn)
wifi.address = 20
wifi_rev_conn = Mock()
wifi.connections.set = Mock(return_value=wifi_rev_conn)
switch.connections.set('user', ns, reverse=False)
switch.connections.set('eth', eth, rname='network')
wifi_conn = switch.connections.set('wifi', wifi, rname='network')
switch.table.add(10, connection='eth', next_hop=2)
switch.table.add(30, connection='wifi', next_hop=23)
pkt_1 = NetworkPacket(destination_address=10, originator_address=5, osn=8)
pkt_2 = NetworkPacket(destination_address=30, originator_address=17, osn=4)
switch.handle_message(pkt_1, connection=wifi, sender=wifi_conn)
sim.schedule.assert_called_once_with(
0, eth.handle_message, args=(pkt_1,), kwargs={
'connection': eth_rev_conn, 'sender': switch,
}
)
sim.schedule.reset_mock()
switch.handle_message(pkt_2, connection=wifi, sender=wifi_conn)
sim.schedule.assert_called_once_with(
0, wifi.handle_message, args=(pkt_2,), kwargs={
'connection': wifi_rev_conn, 'sender': switch,
}
) | 4,701 |
def upload_results(target_folder, local_folder_path):
"""
Uploads results folder containing the bam file (and associated output)
:param bam_s3_path: S3 path to upload the alignment results to
:param local_folder_path: local path containing the alignment results
"""
upload_folder(target_folder, local_folder_path) | 4,702 |
def init_app(app):
"""添加日志记录器"""
app.logger.addHandler(_file_handler()) | 4,703 |
def next_search(request, *args, **kwargs):
"""
Handle search requests
:param request:
:return:
"""
server = FhirServerUrl()
in_fmt = "json"
get_fmt = get_format(request.GET)
if settings.DEBUG:
print("Server:", server)
print("Kwargs:",kwargs)
context = {'display':"Search",
'name': "Search",
'server': server,
'in_fmt': in_fmt,
'get_fmt': get_fmt,
'template': 'v1api/search.html',
}
request_string = "?"
for item in request.GET:
request_string += item +"=" + request.GET[item] +"&"
if request_string[:0] =="&":
request_string = request_string[:-1]
if not "patient=Patient/" in request_string:
try:
xwalk = Crosswalk.objects.get(user=request.user)
patient_id = xwalk.fhir_url_id
request_string += "&patient=Patient/"+patient_id
except Crosswalk.DoesNotExist:
return kickout_404("ID for this user not found:%s" % request.user)
if settings.DEBUG:
print("Gets:", request_string)
try:
r = requests.get(server+request_string)
context = process_page(request, r, context)
return publish_page(request, context)
except requests.ConnectionError:
print("Whoops - Problem connecting to FHIR Server")
messages.error(request,
"FHIR Server is unreachable. "
"Are you on the CMS Network?")
return render_to_response(context['template'],
RequestContext(request, context, )) | 4,704 |
def ShiftRight(x, **unused_kwargs):
"""Layer to shift the tensor to the right by padding on axis 1."""
if not isinstance(x, (list, tuple)): # non-chunked inputs
pad_widths = [(0, 0)] * len(x.shape)
pad_widths[1] = (1, 0) # Padding on axis=1
padded = np.pad(x, pad_widths, mode='constant')
return padded[:, :-1]
# Handling chunked inputs. Recall that the list of chunks represents a big
# sequence (the concatenation of the chunks). We want to shift that sequence,
# so we put a 0 in the beginning of the first chunk and the last element of
# that chunk is used as the new first element of the next chunk, and so on.
padded = []
last_value = np.zeros_like(x[0][:, -1])
for chunk in x:
padded_chunk = np.concatenate([last_value[:, np.newaxis], chunk], axis=1)
last_value = chunk[:, -1]
padded.append(padded_chunk[:, :-1])
return padded | 4,705 |
def calc_weights():
"""
Initialize focus weights matrix following parabolic equation.
Used to give higher priority to pixels located closer to the center of the ROI.
"""
global focusWeights
focusWeights = np.empty((looseBound[3], looseBound[2]), np.float32)
# weight = a * sqr(x - b) + c
b = looseBound[2] / 2.0
a = -1.0 / (b * b)
c = 1.0
curr_right = looseBound[2] - 1
curr_bottom = looseBound[3] - 1
for offset in range(0, (looseBound[2] + 1) // 2):
val = a * ((offset - b) ** 2) + c
cv2.rectangle(focusWeights, (offset, offset), (curr_right, curr_bottom), val)
curr_right -= 1
curr_bottom -= 1 | 4,706 |
def connect_kafka_producer():
"""Return a MSK client to publish the streaming messages."""
# Use a global variable so Lambda can reuse the persisted client on future invocations
global kafka_client
if kafka_client is None:
logger.debug('Creating new Kafka client.')
try:
kafka_client = KafkaProducer(bootstrap_servers=os.environ['MSK_BOOTSTRAP_SRV'])
except Exception as ex:
logger.error('Failed to create new Kafka client: {}'.format(ex))
send_sns_alert(str(ex))
raise
return kafka_client | 4,707 |
def empty_record():
"""Create an empty record."""
record = dump_empty(Marc21RecordSchema)
record["metadata"] = "<record> <leader>00000nam a2200000zca4500</leader></record>"
record["is_published"] = False
record["files"] = {"enabled": True}
return record | 4,708 |
def illuminanceToPhotonPixelRate(illuminance,
objective_numerical_aperture=1.0,
illumination_wavelength=0.55e-6,
camera_pixel_size=6.5e-6,
objective_magnification=1,
system_magnification=1,
sample_quantum_yield=1.,
**kwargs):
"""
Function which converts source illuminance and microscope parameters to
photons / px / s.
Based heavily on the publication:
"When Does Computational Imaging Improve Performance?,"
O. Cossairt, M. Gupta and S.K. Nayar,
IEEE Transactions on Image Processing,
Vol. 22, No. 2, pp. 447–458, Aug. 2012.
However, this function implements the same result for
microscopy, replacing f/# with NA, removing reflectance,
and including magnification.
Args:
exposure_time: Integration time, s
source_illuminance: Photometric source illuminance, lux
numerical_aperture: System numerical aperture
pixel_size: Pixel size of detector, um
magnification: Magnification of imaging system
Returns:
Photon counts at the camera.
"""
# Conversion factor from radiometric to photometric cordinates
# https://www.thorlabs.de/catalogPages/506.pdf
K = 1 / 680
# Planck's constant
# h_bar = 6.626176e-34
h_bar = 1.054572e-34
# Speed of light
c = 2.9979e8
# Constant term
const = K * illumination_wavelength / h_bar / c
# Calculate photon_pixel_rate
photon_pixel_rate = sample_quantum_yield * const * (objective_numerical_aperture ** 2) * illuminance * (camera_pixel_size / (system_magnification * objective_magnification)) ** 2
# Return
return photon_pixel_rate | 4,709 |
def stop_tuning(step):
""" stop tuning the current step method """
if hasattr(step, 'tune'):
step.tune = False
elif hasattr(step, 'methods'):
step.methods = [stop_tuning(s) for s in step.methods]
return step | 4,710 |
def assemble_english():
"""Assemble each statement into """
if request.method == 'OPTIONS':
return {}
response = request.body.read().decode('utf-8')
body = json.loads(response)
stmts_json = body.get('statements')
stmts = stmts_from_json(stmts_json)
sentences = {}
for st in stmts:
enga = EnglishAssembler()
enga.add_statements([st])
model_str = enga.make_model()
sentences[st.uuid] = model_str
res = {'sentences': sentences}
return res | 4,711 |
def test_json_schema_component_to_modelldcatno_returns_empty() -> None:
"""Test that empty ParsedSchema is returned if invalid JSON Schema is passed."""
json_schema_dict: Dict[str, str] = {}
base_uri = "http://uri.com"
schema = Schema(base_uri, json_schema_dict)
path: List[str] = []
parsed_schema = json_schema_component_to_modelldcatno(schema, path)
assert ParsedSchema() == parsed_schema | 4,712 |
def schema_class(classname, schema, schemarepr=None, basename='SchemaBase'):
"""Generate code for a schema class
Parameters
----------
classname : string
The name of the class to generate
schema : dict
The dictionary defining the schema class
basename : string (default: "SchemaBase")
The name of the base class to use in the class definition
schemarepr : CodeSnippet or object, optional
An object whose repr will be used in the place of the explicit schema.
This can be useful, for example, when the generated code should reference
a predefined schema object. The user must ensure that the schema within
the evaluated code is identical to the schema used to generate the code.
"""
return SCHEMA_CLASS_TEMPLATE.format(
classname=classname,
basename=basename,
schema=schema if schemarepr is None else schemarepr,
docstring=docstring(classname, schema, indent=4),
init_code=init_code(classname, schema, indent=4)
) | 4,713 |
def orthology_events(ids='R-HSA-6799198,R-HSA-168256,R-HSA-168249', species='49633'):
"""
Reactome uses the set of manually curated human reactions to computationally infer reactions in
twenty evolutionarily divergent eukaryotic species for which high-quality whole-genome sequence
data are available, and hence a comprehensive and high-quality set of protein predictions exists.
Thus, this method retrieves the orthologies for any given set of events or entities in the specified species.
:param ids: The events identifiers for which the orthology is requested
:param species: The species id for which the orthology is requested
:return: Json dictionary object of the orthologies of a given set of events or entities
"""
headers = {
'accept': 'application/json',
'content-type': 'text/plain',
}
data = ids
url = 'https://reactome.org/ContentService/data/orthologies/ids/species/%s' % species
try:
response = requests.post(url=url, headers=headers, data=data)
except ConnectionError as e:
print(e)
if response.status_code == 200:
return response.json()
else:
print('Status code returned a value of %s' % response.status_code) | 4,714 |
def judge(name):
"""
Return some sort of score for automatically ranking names based on all the
features we can extract so far.
I guess we'll just add the scores * weights up for now.
"""
score = 0
for scoreID, scorer, weight in weights:
subscore = scorer(name)
score += subscore * weight
name.scores[scoreID] = subscore
name.score = score
return score | 4,715 |
def parse_year(inp, option='raise'):
"""
Attempt to parse a year out of a string.
Parameters
----------
inp : str
String from which year is to be parsed
option : str
Return option:
- "bool" will return True if year is found, else False.
- Return year int / raise a RuntimeError otherwise
Returns
-------
out : int | bool
Year int parsed from inp,
or boolean T/F (if found and option is bool).
Examples
--------
>>> year_str = "NSRDB_2018.h5"
>>> parse_year(year_str)
2018
>>> year_str = "NSRDB_2018.h5"
>>> parse_year(year_str, option='bool')
True
>>> year_str = "NSRDB_TMY.h5"
>>> parse_year(year_str)
RuntimeError: Cannot parse year from NSRDB_TMY.h5
>>> year_str = "NSRDB_TMY.h5"
>>> parse_year(year_str, option='bool')
False
"""
# char leading year cannot be 0-9
# char trailing year can be end of str or not 0-9
regex = r".*[^0-9]([1-2][0-9]{3})($|[^0-9])"
match = re.match(regex, inp)
if match:
out = int(match.group(1))
if 'bool' in option:
out = True
else:
if 'bool' in option:
out = False
else:
raise RuntimeError('Cannot parse year from {}'.format(inp))
return out | 4,716 |
def retry_import(e, **kwargs):
"""
When an exception occurs during channel/content import, if
* there is an Internet connection error or timeout error,
or HTTPError where the error code is one of the RETRY_STATUS_CODE,
return return True to retry the file transfer
* the file does not exist on the server or disk, skip the file and return False.
This only applies to content import not channel import.
* otherwise, raise the exception.
return value:
* True - needs retry.
* False - file is skipped. Does not need retry.
"""
skip_404 = kwargs.pop("skip_404")
if (
isinstance(e, ConnectionError)
or isinstance(e, Timeout)
or isinstance(e, ChunkedEncodingError)
or (isinstance(e, HTTPError) and e.response.status_code in RETRY_STATUS_CODE)
or (isinstance(e, SSLERROR) and "decryption failed or bad record mac" in str(e))
):
return True
elif skip_404 and (
(isinstance(e, HTTPError) and e.response.status_code == 404)
or (isinstance(e, OSError) and e.errno == 2)
):
return False
else:
raise e | 4,717 |
def register_random_state_impl(clz=None, *, scalar=None, batch=None):
"""
Register an implementation for the function generating random
state for the given Hilbert space class.
The rule can be implemented both as a scalar rule and as a batched
rule, but the best performance will be obtained by implementing
the batched version.
The missing rule will be auto-implemented from the over.
scalar must have signature
(hilb, key, dtype) -> vector
batch must have signature
(hilb, key, size, dtype) -> matrix of states
The function will be jit compiled, so make sure to use jax.numpy.
Hilbert is passed as a static object.
Arguments:
clz: The class of the hilbert space
scalar: The function computing a single random state
batch: the function computing batches of random states
"""
if scalar is None and batch is None:
raise ValueError("You must at least provide a scalar or batch rule.")
scalar_rule = scalar
batch_rule = batch
if scalar is None:
if clz is None:
clz = list(batch.__annotations__.items())[0]
scalar_rule = partial(_random_state_scalar_default_impl, batch_rule=batch_rule)
if batch is None:
if clz is None:
clz = list(scalar.__annotations__.items())[0]
batch_rule = partial(_random_state_batch_default_impl, scalar_rule=scalar_rule)
random_state_scalar.register(clz, scalar_rule)
random_state_batch.register(clz, batch_rule) | 4,718 |
def cvCloneMat(*args):
"""cvCloneMat(CvMat mat) -> CvMat"""
return _cv.cvCloneMat(*args) | 4,719 |
def run_episode(kwargs) -> [Trajectory]:
"""
Runs a single episode and collects the trajectories of each agent
"""
total_controller_time = 0
env_dict: Callable = kwargs.get("env_dict")
obs_builder = kwargs.get("obs_builder")
controller_creator: Callable = kwargs.get("controller_creator")
episode_id: int = kwargs.get("episode_id")
max_episode_length: int = kwargs.get("max_episode_length", 1000)
render: bool = kwargs.get("render", False)
# Create and Start Environment
_env = load_env(env_dict, obs_builder_object=obs_builder)
obs, info = _env.reset(regenerate_rail=False, regenerate_schedule=True, )
score = 0
_trajectories = [Trajectory() for _ in _env.get_agent_handles()]
# Create and Start Controller
controller: AbstractController = controller_creator()
start = time.time()
controller.start_of_round(obs=obs, env=_env)
total_controller_time += time.time() - start
if render:
env_renderer = RenderTool(_env)
env_renderer.reset()
for step in range(max_episode_length):
start = time.time()
action_dict, processed_obs = controller.act(observation=obs)
total_controller_time += time.time() - start
next_obs, all_rewards, done, info = _env.step(action_dict)
if render:
env_renderer.render_env(show=True, show_observations=True, show_predictions=False)
# Save actions and rewards for each agent
[_trajectories[agent_handle].add_row(
state=processed_obs[agent_handle],
action=action_dict[agent_handle],
reward=all_rewards[agent_handle],
done=done[agent_handle])
for agent_handle in _env.get_agent_handles()]
score += sum(all_rewards)
obs = next_obs.copy()
if done['__all__']:
break
if render:
env_renderer.close_window()
# print(f"\nController took a total time of: {total_controller_time} seconds", flush=True)
return _trajectories | 4,720 |
def init_output_logging():
"""
Initialize output logger
"""
global output_logger
if output_logger is None:
output_logger = OutputLogger()
sys.stdout = TeeOutputStream([sys.stdout, output_logger],
autoflush=True)
sys.stderr = TeeOutputStream([sys.stderr, output_logger],
autoflush=True) | 4,721 |
async def _reverse_proxy_handler(request: web.Request) -> web.Response:
"""
- Adds auth layer
- Adds access layer
- Forwards request to catalog service
SEE https://gist.github.com/barrachri/32f865c4705f27e75d3b8530180589fb
"""
user_id = request[RQT_USERID_KEY]
# path & queries
backend_url = to_backend_service(
request.rel_url,
request.app[f"{__name__}.catalog_origin"],
request.app[f"{__name__}.catalog_version_prefix"],
)
# FIXME: hack
if "/services" in backend_url.path:
backend_url = backend_url.update_query({"user_id": user_id})
logger.debug("Redirecting '%s' -> '%s'", request.url, backend_url)
# body
raw = None
if request.can_read_body:
raw: bytes = await request.read()
# injects product discovered by middleware in headers
fwd_headers = request.headers.copy()
product_name = request[RQ_PRODUCT_KEY]
fwd_headers.update({X_PRODUCT_NAME_HEADER: product_name})
# forward request
return await _request_catalog(
request.app, request.method, backend_url, fwd_headers, raw
) | 4,722 |
def get_data(request: Request):
"""
Get the data page.
Parameters
----------
request : Request
The request object.
Returns
-------
HTMLResponse
The data page.
"""
return templates.TemplateResponse("data.html", {"request": request}) | 4,723 |
def remove_last_measurements(dag_circuit, perform_remove=True):
"""Removes all measurements that occur as the last operation
on a given qubit for a DAG circuit. Measurements that are followed by
additional gates are untouched.
This operation is done in-place on the input DAG circuit if perform_pop=True.
Parameters:
dag_circuit (qiskit.dagcircuit._dagcircuit.DAGCircuit): DAG circuit.
perform_remove (bool): Whether to perform removal, or just return node list.
Returns:
list: List of all measurements that were removed.
"""
removed_meas = []
try:
meas_nodes = dag_circuit.get_named_nodes('measure')
except DAGCircuitError:
return removed_meas
for idx in meas_nodes:
_, succ_map = dag_circuit._make_pred_succ_maps(idx)
if len(succ_map) == 2:
# All succesors of the measurement are outputs, one for qubit and one for cbit
# (As opposed to more gates being applied), and it is safe to remove the
# measurement node and add it back after the swap mapper is done.
removed_meas.append(dag_circuit.multi_graph.node[idx])
if perform_remove:
dag_circuit._remove_op_node(idx)
return removed_meas | 4,724 |
def get_html(url):
"""
Given a URL, will return the HTML using urllib3.
:param url: The url to extract the HTML from
:return: If extracted successfully, the HTML is returned. If there is a failure, a message with HTTP status. If an exception is thrown, -1 is returned witha description of the error
"""
try:
# urllib3.disable_warnings()
# Try with new where function, but sometimes it failes
# so then try old where function
# Read more: https://github.com/certifi/python-certifi#usage
try:
http = urllib3.PoolManager(
cert_reqs='CERT_REQUIRED',
ca_certs=certifi.where()
)
except:
http = urllib3.PoolManager(
cert_reqs='CERT_REQUIRED',
ca_certs=certifi.old_where()
)
r = http.request('GET', url, timeout=5.0)
if str(r.status).startswith("2"):
html = r.data.decode("utf-8")
return html
else:
return "Failed to get html, status: " + str(r.status)
except Exception as e:
sys.stdout.write(str(e))
return "-1: " + str(e) | 4,725 |
def J(*args, **kwargs):
"""Wrapper around jsonify that sets the Content-Type of the response to
application/vnd.api+json.
"""
response = jsonify(*args, **kwargs)
response.mimetype = "application/vnd.api+json"
return response | 4,726 |
def readable_dir(prospective_dir):
""" check if dir is exist or acessable"""
if not os.path.isdir(prospective_dir):
sys.exit("{} is not a valid path".format(prospective_dir))
if os.access(prospective_dir, os.R_OK):
return prospective_dir
else:
sys.exit("{} is not a readable dir".format(prospective_dir)) | 4,727 |
def is_gzipped(filename):
""" Returns True if the target filename looks like a GZIP'd file.
"""
with open(filename, 'rb') as fh:
return fh.read(2) == b'\x1f\x8b' | 4,728 |
def test_tan():
"""Test of tan method."""
# Test for sin with Rnode objects
x = Rnode(1.0)
z = Elem.tan(x)
z.grad_value = 1.0
try:
assert z.value == np.tan(x.value)
assert x.grad() == 1 / (np.cos(x.value) ** 2)
except AssertionError as e:
print(e)
# Test for tan with two Dual objects
val1 = Dual(3, [4, 1])
val2 = Dual(2, [3, 1])
val = val1 - val2
z = Elem.tan(val)
try:
assert z.val == np.tan(val.val)
assert z.der[0] == 1/np.cos(val.val)**2 * val.der[0]
assert z.der[1] == 1/np.cos(val.val)**2 * val.der[1]
except AssertionError as e:
print(e)
raise AssertionError
# Test for tan with int,
x = 3
fx = Elem.tan(x)
try:
assert fx == np.tan(x)
except AssertionError as e:
print(e)
raise AssertionError | 4,729 |
def load_schedule_data(
neo4j_session: neo4j.Session, data: List[Dict], update_tag: int,
) -> None:
"""
Transform and load schedule information
"""
ingestion_cypher_query = """
UNWIND {Schedules} AS schedule
MERGE (u:PagerDutySchedule{id: schedule.id})
ON CREATE SET u.html_url = schedule.html_url,
u.firstseen = timestamp()
SET u.type = schedule.type,
u.summary = schedule.summary,
u.name = schedule.name,
u.time_zone = schedule.time_zone,
u.description = schedule.description,
u.lastupdated = {update_tag}
"""
logger.info(f"Loading {len(data)} pagerduty schedules.")
users: List[Dict[str, Any]] = []
layers: List[Dict[str, Any]] = []
for schedule in data:
if schedule.get("users"):
for user in schedule["users"]:
users.append({"schedule": schedule["id"], "user": user["id"]})
if schedule.get("schedule_layers"):
for layer in schedule["schedule_layers"]:
layer["_schedule_id"] = schedule["id"]
layers.append(layer)
neo4j_session.run(
ingestion_cypher_query,
Schedules=data,
update_tag=update_tag,
)
_attach_users(neo4j_session, users, update_tag)
_attach_layers(neo4j_session, layers, update_tag) | 4,730 |
def load_data(loc='./data/'):
"""
Load the SICK semantic-relatedness dataset
"""
trainA, trainB, devA, devB, testA, testB = [],[],[],[],[],[]
trainS, devS, testS = [],[],[]
with open(os.path.join(loc, 'sick_train.txt'), 'r') as f:
for line in f:
text = line.strip().split('\t')
trainA.append(text[0])
trainB.append(text[1])
trainS.append(text[2])
with open(os.path.join(loc, 'sick_dev.txt'), 'r') as f:
for line in f:
text = line.strip().split('\t')
devA.append(text[0])
devB.append(text[1])
devS.append(text[2])
with open(os.path.join(loc, 'sick_test.txt'), 'r') as f:
for line in f:
text = line.strip().split('\t')
testA.append(text[0])
testB.append(text[1])
testS.append(text[2])
trainS = [float(s) for s in trainS]
devS = [float(s) for s in devS]
testS = [float(s) for s in testS]
return [trainA, trainB], [devA, devB], [testA, testB], [trainS, devS, testS] | 4,731 |
def tag_helper(tag, items, locked=True, remove=False):
""" Simple tag helper for editing a object. """
if not isinstance(items, list):
items = [items]
data = {}
if not remove:
for i, item in enumerate(items):
tagname = '%s[%s].tag.tag' % (tag, i)
data[tagname] = item
if remove:
tagname = '%s[].tag.tag-' % tag
data[tagname] = ','.join(items)
data['%s.locked' % tag] = 1 if locked else 0
return data | 4,732 |
def send_email_updates(poller_name):
""" Takes a school name and updates its course data and then sends all open course notifications. """
requests = UpdateDB(poller_name)
for request in requests:
emails = []
for entry in request[0]:
emails.append(entry.email)
db.session.delete(entry)
send_email(emails, request[1], request[1]) | 4,733 |
def _check_stata_output(output):
""".. Check Stata output"""
regex = "end of do-file[\s]*r\([0-9]*\);"
if re.search(regex, output):
error_message = 'Stata program executed with errors.'
error_message = format_message(error_message)
raise_from(ProgramError(error_message,
'See makelog for more detail.'), None) | 4,734 |
def test_ap_wps_upnp_http_proto_chunked(dev, apdev):
"""WPS AP and UPnP/HTTP protocol testing for chunked encoding"""
ap_uuid = "27ea801a-9e5c-4e73-bd82-f89cbcd10d7e"
add_ssdp_ap(apdev[0], ap_uuid)
location = ssdp_get_location(ap_uuid)
url = urlparse(location)
conn = HTTPConnection(url.netloc)
#conn.set_debuglevel(1)
headers = { "Transfer-Encoding": 'chunked' }
conn.request("POST", "hello",
"a\r\nabcdefghij\r\n" + "2\r\nkl\r\n" + "0\r\n\r\n",
headers)
resp = conn.getresponse()
if resp.status != 404:
raise Exception("Unexpected HTTP response: %d" % resp.status)
conn.close()
conn.putrequest("POST", "hello")
conn.putheader('Transfer-Encoding', 'chunked')
conn.endheaders()
conn.send(b"a\r\nabcdefghij\r\n")
time.sleep(0.1)
conn.send(b"2\r\nkl\r\n")
conn.send(b"0\r\n\r\n")
resp = conn.getresponse()
if resp.status != 404:
raise Exception("Unexpected HTTP response: %d" % resp.status)
conn.close()
conn.putrequest("POST", "hello")
conn.putheader('Transfer-Encoding', 'chunked')
conn.endheaders()
completed = False
try:
for i in range(20000):
conn.send(b"1\r\nZ\r\n")
conn.send(b"0\r\n\r\n")
resp = conn.getresponse()
completed = True
except Exception as e:
pass
conn.close()
if completed:
raise Exception("Too long chunked request did not result in connection reset")
headers = { "Transfer-Encoding": 'chunked' }
conn.request("POST", "hello", "80000000\r\na", headers)
try:
resp = conn.getresponse()
except Exception as e:
pass
conn.close()
conn.request("POST", "hello", "10000000\r\na", headers)
try:
resp = conn.getresponse()
except Exception as e:
pass
conn.close() | 4,735 |
def test_buchtitelgenerator_returns_mocked_books(mock_buchtitelgenerator: Mock) -> None:
"""It returns a list."""
books = randombuch.buchtitelgenerator()
assert "Foo" in books | 4,736 |
def get_keys_from_file(csv):
"""Extract the credentials from a csv file."""
lines = tuple(open(csv, 'r'))
creds = lines[1]
access = creds.split(',')[2]
secret = creds.split(',')[3]
return access, secret | 4,737 |
def _handle_api_error_with_json(http_exc, jsondata, response):
"""Handle YOURLS API errors.
requests' raise_for_status doesn't show the user the YOURLS json response,
so we parse that here and raise nicer exceptions.
"""
if 'code' in jsondata and 'message' in jsondata:
code = jsondata['code']
message = jsondata['message']
if code == 'error:noloop':
raise YOURLSNoLoopError(message, response=response)
elif code == 'error:nourl':
raise YOURLSNoURLError(message, response=response)
elif 'message' in jsondata:
message = jsondata['message']
raise YOURLSHTTPError(message, response=response)
http_error_message = http_exc.args[0]
raise YOURLSHTTPError(http_error_message, response=response) | 4,738 |
async def roll(ctx, *, param:re_convert=re_convert.defaults):
"""Rolls a dice in NdN format."""
rolls, limit = map(int, param["dice"].split('d'))
result = ', '.join(str(random.randint(1, limit)) for r in range(rolls))
await ctx.send(result) | 4,739 |
def fix_model(project, models, invert=False):
"""Fix model name where file attribute is different from values accepted by facets
>>> fix_model('CMIP5', ['CESM1(BGC)', 'CESM1-BGC'])
['CESM1(BGC)', 'CESM1(BGC)']
>>> fix_model('CMIP5', ['CESM1(BGC)', 'CESM1-BGC'], invert=True)
['CESM1-BGC', 'CESM1-BGC']
Args:
project (str): data project
models (list) models to convert
invert (bool): Invert the conversion (so go from ``CESM1(BGC)`` to ``CESM1-BGC``)
"""
project = project.upper().split('-')[0]
if project in ['CMIP5', 'CORDEX']:
mfile = pkg_resources.resource_filename(__name__, 'data/'+project+'_model_fix.json')
with open(mfile, 'r') as f:
mdict = json.loads(f.read())
if invert:
mfix = {v: k for k, v in mdict.items()}
else:
mfix = mdict
return [mfix[m] if m in mfix.keys() else m for m in models] | 4,740 |
def rectified_linear_unit(x):
""" Returns the ReLU of x, or the maximum between 0 and x."""
# TODO
return np.maximum(0, X) | 4,741 |
def register_npzd_data(vs, tracer):
""" Add tracer to the NPZD data set and create node in interaction graph
Tracers added are available in the npzd dynamics and is automatically
included in transport equations
Parameters
----------
tracer
An instance of :obj:`veros.core.npzd_tracer.NPZD_tracer`
to be included in biogeochemistry calculations
"""
if tracer.name in vs.npzd_tracers.keys():
raise ValueError('{name} has already been added to the NPZD data set'.format(name=tracer.name))
vs.npzd_tracers[tracer.name] = tracer
if tracer.transport:
vs.npzd_transported_tracers.append(tracer.name) | 4,742 |
def create_save_directory(path, directory_name):
"""
This function makes the directory to save the data.
Parameters
----------
path : string
Where the the directory_name will be.
directory_name : string
The directory name where the plots will be save
Returns
----------
succes : bool
True if the directories were created successfully.
"""
try:
if not os.path.isdir(f'{path}'):
os.mkdir(f'{path}')
os.mkdir(f'{path}\\{directory_name}')
return True
except OSError:
print('Error creating directories')
return False | 4,743 |
def bwt_compress(filename):
"""Compress with bwt."""
with open(filename, "rb") as in_stream:
with open(filename + ".bwt", "wb") as out_stream:
bwt_encoded = bwt_encode(in_stream)
rle_encoded = rle_encode(bwt_encoded)
out_stream.write(rle_encoded)
os.remove(filename) | 4,744 |
def encode_to_filename(folder, animal, session, ftypes="processed_all"):
"""
:param folder: str
folder for data storage
:param animal: str
animal name: e.g. A2A-15B-B_RT
:param session: str
session name: e.g. p151_session1_FP_RH
:param ftype: list or str:
list (or a single str) of typed files to return
'exper': .mat files
'bin_mat': binary file
'green': green fluorescence
'red': red FP
'behavior': .mat behavior file
'FP': processed dff hdf5 file
if ftypes=="all"
:return:
returns all 5 files in a dictionary; otherwise return all file types
in a dictionary, None if not found
"""
# TODO: enable aliasing
paths = [os.path.join(folder, animal, session), os.path.join(folder, animal+'_'+session),
os.path.join(folder, animal), folder]
if ftypes == "raw all":
ftypes = ["exper", "bin_mat", "green", "red"]
elif ftypes == "processed_all":
ftypes = ["processed", "green", "red", "FP"]
elif isinstance(ftypes, str):
ftypes = [ftypes]
results = {ft: None for ft in ftypes}
registers = 0
for p in paths:
if os.path.exists(p):
for f in os.listdir(p):
opt = decode_from_filename(f)
if opt is not None:
ift = opt['ftype']
check_mark = opt['animal'] == animal and opt['session'] == session
#print(opt['session'], animal, session)
check_mark_mdl = (opt['animal'] == animal) and (opt['session'] in session)
cm_mdl = (ift == 'modeling' and check_mark_mdl)
# TODO: temporary hacky method for modeling
#print(opt['session'], animal, session, check_mark_mdl, ift, cm_mdl)
if ift in ftypes and results[ift] is None and (check_mark or cm_mdl):
results[ift] = os.path.join(p, f)
registers += 1
if registers == len(ftypes):
return results if len(results) > 1 else results[ift]
return results if len(results) > 1 else list(results.values())[0] | 4,745 |
def _get_choices(choices: Union[str, List]) -> List[Tuple[str, str]]:
"""Returns list of choices, used for the ChoiceFields"""
result = [('', '')]
if isinstance(choices, str):
result.append((choices, choices))
else:
for choice in choices:
result.append((choice, choice))
return result | 4,746 |
def invalid_file():
"""Create an invalid filename string."""
return "/tmp/INVALID.FILE" | 4,747 |
def ingest_aw1c_manifest():
"""
Entrypoint for CVL AW1C Manifest Ingestion workflow
"""
with GenomicJobController(GenomicJob.AW1C_INGEST,
bucket_name=None,
bucket_name_list=config.GENOMIC_CENTER_BUCKET_NAME,
sub_folder_name=config.GENOMIC_CVL_AW1C_MANIFEST_SUBFOLDER) as controller:
controller.run_aw1c_workflow() | 4,748 |
def get_model(app_label, model_name):
"""
Fetches a Django model using the app registery.
All other methods to acces models might raise an exception about
registery not being ready yet.
This doesn't require that an app with the given app label exists,
which makes it safe to call when the registery is being populated.
Raises LookupError if model isn't found
"""
try:
return apps.get_model(app_label, model_name)
except AppRegistryNotReady:
if apps.apps_ready and not apps.models_ready:
# if this function is called while `apps.populate()` is
# loading models, ensure that the module thar defines
# the target model has been imorted and try looking the
# model up in the app registery. This effectiveness emulates
# `from path.to.app.models import Model` where we use
# `Model = get_model('app', 'Model')` instead
app_config = apps.get_app_config(app_label)
# `app_config.import_models()` cannot be used here because
# it would interfere with `app.populate()`
import_module("%s.%s" % (app_config.name, MODELS_MODULE_NAME))
# In order to account for case-insensitivity of model_name,
# look up the model through a private API of the app registry.
return apps.get_registered_model(app_label, model_name)
else:
# This must be a different case (e.g. the model really doesn't
# exist). We just re-raise the exception.
raise | 4,749 |
def main():
"""main"""
pass | 4,750 |
def add_title(file: '_io.TextIOWrapper', ext: str, title: str, link: str) -> None:
"""Add title and link of URL to the document. Format depends on file ext.
@param File file: File to add to.
@param str ext: File extension.
@param str title: URL title.
@param str link: URL link.
@return: Nothing."""
title_text = title + " "
if is_html(ext):
title_text = "<h1>" + title_text + "<a href=\"" + link + "\">" + link + "</a></h1>"
else:
title_text += link
title_text += "\n"
file.write(title_text) | 4,751 |
def hexdump(adr_data_tuple, output=sys.stdout):
"""\
Print a hex dump.
:param adr: address
:param memstr: memory contents (bytes/string)
:param output: file like object to write to
"""
adr, memstr = adr_data_tuple
# conversion to byte array only needed for python 2.xx as bytes would return
# characters instead of ints
for address, row in sixteen(adr, bytearray(memstr)):
values = ' '.join('{:02x}'.format(x) for x in row)
ascii = ''.join(chr(x) if (32 <= x < 128) else '.' for x in row)
# pad width
values += ' ' * (47 - len(values))
ascii += ' ' * (16 - len(values))
# output line, insert gap at 8
output.write('{:08x}: {} {} {} {}\n'.format(
address,
values[:24], values[24:],
ascii[:8], ascii[8:])) | 4,752 |
def test_main() -> None:
"""Should load dry (no config) and with empty config"""
filename = f"tests/fixtures/temp_{NOW}"
assert not os.path.isfile(filename)
with patch.object(prfile, "CONFIG_FILE", filename):
with patch("builtins.input", lambda user_in: "mock"):
with patch.object(prfile, "run_user_prompt"):
with patch.object(prfile, "RepoActions"):
with pytest.raises(FileNotFoundError):
prfile.main(prfile.cli_parser(MOCK_FILES))
result = prfile.main(prfile.cli_parser(VALID_FILES))
assert os.path.isfile(filename)
os.remove(filename)
assert result == 0 | 4,753 |
def affaires_view(request):
"""
Return all affaires
"""
# Check connected
if not check_connected(request):
raise exc.HTTPForbidden()
query = request.dbsession.query(VAffaire).order_by(VAffaire.id.desc()).all()
return Utils.serialize_many(query) | 4,754 |
def metric_try_to_float(s: str) -> Union[float, str]:
"""
Try to convert input string to float value.
Return float value on success or input value on failure.
"""
v = s
try:
if "%" in v:
v = v[:-1]
return float(v)
except ValueError:
return str(s) | 4,755 |
def calculate_boot_time(pngs_dir, fps, refer_end_pic):
"""
通过一系列的截图文件,计算出启动时间
:param pngs_dir: 截图所在目录
:param fps: 帧数
:param refer_end_pic: 结束位置参考图片
:return: 启动时间
"""
# 找启动的开始(点击响应)、结束时间(渲染首页内容)点
pngs = os.listdir(pngs_dir)
pngs.sort()
start_t, end_t, boot_time = 0, 0, 0
# 找开始点,对比和第一张图的相似度
refer_start_pic = os.path.join(pngs_dir, pngs[0])
for png in pngs[1:]:
dest_png = os.path.join(pngs_dir, png)
factor = ssim.compute_ssim(refer_start_pic, dest_png)
logging.info("%s 相似度:%f" % (png, factor))
if factor < 0.9:
start_t = int(png.split('.png')[0])
break
if start_t > 0:
# 继续找结束点,和灰度的连续匹配两次的最后位置
third_f, second_f, first_f = 0, 0, 0
for png in pngs[start_t:]:
dest_png = os.path.join(pngs_dir, png)
current_f = ssim.compute_ssim(refer_end_pic, dest_png)
logging.info("%s 相似度:%f" % (png, current_f))
third_f = second_f
second_f = first_f
first_f = current_f
# TODO 这个范围根据实际的业务场景自己确定
if third_f > 0.96 and second_f > 0.96 and first_f < 0.96:
end_t = int(png.split('.png')[0])
break
# 有效性判断和时间计算
if start_t == 0 or end_t == 0:
logging.warning("没有找到开始或者结束图片")
elif end_t == len(pngs):
logging.warning("结束位置错误")
else:
boot_time = int((end_t - start_t) * 1000 / fps)
logging.info("开始位置:%d,结束位置:%d,本次启动耗时:%d毫秒", start_t, end_t, boot_time)
return boot_time | 4,756 |
def _table_difference(left: TableExpr, right: TableExpr):
"""
Form the table set difference of two table expressions having identical
schemas. A set difference returns only the rows present in the left table
that are not present in the right table
Parameters
----------
left : TableExpr
right : TableExpr
Returns
-------
difference : TableExpr
"""
return ops.Difference(left, right).to_expr() | 4,757 |
def test_timerange_ispropersuperset():
"""timerange_ispropersuperset(TimeRange, TimeRange) -> bool"""
timerange_a = TimeRange(Time(1000), Time(2000))
timerange_b = TimeRange(Time(2500), Time(3000))
timerange_c = TimeRange(Time(2000), Time(2500))
timerange_d = TimeRange(Time(1500), Time(2500))
timerange_e = TimeRange(Time(1000), Time(2000))
timerange_f = TimeRange(Time(500), Time(1500))
timerange_g = TimeRange(Time(500), Time(1000))
timerange_h = TimeRange(Time(0), Time(500))
timerange_i = TimeRange(Time(1500), Time(2000))
timerange_j = TimeRange(Time(1200), Time(1800))
timerange_k = TimeRange(Time(1000), Time(1500))
timerange_l = TimeRange(Time(1000), Time(2500))
timerange_m = TimeRange(Time(500), Time(2500))
timerange_n = TimeRange(Time(500), Time(2000))
assert timerange_ispropersuperset(timerange_a, timerange_b) is False
assert timerange_ispropersuperset(timerange_a, timerange_c) is False
assert timerange_ispropersuperset(timerange_a, timerange_d) is False
assert timerange_ispropersuperset(timerange_a, timerange_e) is False
assert timerange_ispropersuperset(timerange_a, timerange_f) is False
assert timerange_ispropersuperset(timerange_a, timerange_g) is False
assert timerange_ispropersuperset(timerange_a, timerange_h) is False
assert timerange_ispropersuperset(timerange_a, timerange_i) is False
assert timerange_ispropersuperset(timerange_a, timerange_j) is False
assert timerange_ispropersuperset(timerange_a, timerange_k) is False
assert timerange_ispropersuperset(timerange_a, timerange_l) is True
assert timerange_ispropersuperset(timerange_a, timerange_m) is True
assert timerange_ispropersuperset(timerange_a, timerange_n) is True | 4,758 |
def select(receivers, senders, exceptions, timeout):
"""
receivers - list of one element, the simulated receiver socket
senders - list of one element, the simulated sender socket
exceptions - empty list, the simulated sockets with exceptions
ignore timeout - there is no real concurrency here
"""
# print 'select: recv buffers "%s", send buffers "%s", bufsize %d' % \
# (''.join(receivers[0].buffers), ''.join(senders[0].buffers), bufsize) #DEBUG
inputready = receivers if len(receivers[0].buffers) > 0 else []
outputready = senders if (socket_simulator.bufsize
- len(senders[0].buffers)) > 0 else []
exceptions = []
return inputready, outputready, exceptions | 4,759 |
def _create_local_database(db_file_path):
"""Create a new local database"""
conn = sql.connect(db_file_path)
cur = conn.cursor()
table = str('CREATE TABLE config ('
'ID INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,'
'Name TEXT UNIQUE NOT NULL,'
'Value TEXT);')
cur.execute(table)
if conn:
conn.close() | 4,760 |
def parse_args():
"""
It parses the command-line arguments.
Parameters
----------
args : list[str]
List of command-line arguments to parse
Returns
-------
parsed_args : argparse.Namespace
It contains the command-line arguments that are supplied by the user
"""
parser = ap.ArgumentParser(description="Encoding algorithm.")
parser.add_argument("docking_program", type=str,
help="Path to folder containing the PDB files.")
parser.add_argument("output", type=str,
help="Path to the output file.")
parser.add_argument("-c","--n_proc", type=int,
help='Number of processor.', default = 1)
parser.add_argument("--chain", type=str,
help='Chain ID from the ligand protein.', default = 'B')
parser.add_argument("--score", type=str,
help='Path to normalized scoring file to add in the ' +
'encoding.')
parsed_args = parser.parse_args()
return parsed_args | 4,761 |
def parse_row(row, entity_dict, span_capture_list, previous_entity):
""" updates the entity dict and span capture list based on row contents """
bio_tag, entity = parse_tag(row.tag)
if bio_tag == 'B':
# update with previous entity, if applicable
entity_dict, span_capture_list, previous_entity = update_entity_dict(entity_dict, span_capture_list, previous_entity)
# start collecting new entity
span_capture_list = [row.word]
previous_entity = entity
elif bio_tag == 'I':
# continue collecting entity
span_capture_list.append(row.word)
else:
# update with previous entity, if applicable
entity_dict, span_capture_list, previous_entity = update_entity_dict(entity_dict, span_capture_list, previous_entity)
previous_entity = None
return entity_dict, span_capture_list, previous_entity | 4,762 |
def _is_fn_init(
tokens: list[Token] | Token,
errors_handler: ErrorsHandler,
path: str,
namehandler: NameHandler,
i: int = 0
):
""" "fn" <fn-name> "("<arg>*")" (":" <returned-type>)? <code-body>"""
tokens = extract_tokens_with_code_body(tokens, i)
if tokens is None or not is_kw(tokens[0], 'fn'):
return False
has_type_annotation = len(tokens) >= 4 and is_op(tokens[3], '->')
if len(tokens) < 4 or not is_base_name(tokens[1]) or tokens[2].type != TokenTypes.PARENTHESIS \
or not _is_code_body(tokens[-1]) or (
has_type_annotation and not _is_type_expression(tokens[:-1], errors_handler, path, namehandler, 4)
) or (not has_type_annotation and len(tokens) != 4):
errors_handler.final_push_segment(
path,
'SyntaxError: invalid syntax',
tokens[-1],
fill=True
)
return False
args_tokens = tokens[2].value
if args_tokens:
if args_tokens[0].type == TokenTypes.TUPLE:
has_default_argument = False
for arg_tokens in args_tokens[0].value:
if not arg_tokens:
break
if not _is_setvalue_expression(arg_tokens, errors_handler, path, namehandler, init_type='let'):
errors_handler.final_push_segment(
path,
'SyntaxError: invalid syntax',
arg_tokens[0],
fill=True
)
return False
if DummyToken(TokenTypes.OP, '=') in arg_tokens:
has_default_argument = True
elif has_default_argument:
errors_handler.final_push_segment(
path,
'SyntaxError: non-default argument follows default argument',
arg_tokens[0],
fill=True
)
return False
elif not _is_setvalue_expression(args_tokens, errors_handler, path, namehandler, init_type='let'):
return False
return True | 4,763 |
def _toggle_options(event, params):
"""Toggle options (projectors) dialog"""
import matplotlib.pyplot as plt
if len(params['projs']) > 0:
if params['fig_opts'] is None:
_draw_proj_checkbox(event, params, draw_current_state=False)
else:
# turn off options dialog
plt.close(params['fig_opts'])
del params['proj_checks']
params['fig_opts'] = None | 4,764 |
async def http_connect(address: str, port: int) -> HttpConnection:
"""Open connection to a remote host."""
loop = asyncio.get_event_loop()
_, connection = await loop.create_connection(HttpConnection, address, port)
return cast(HttpConnection, connection) | 4,765 |
def internal_copied_filegroup(name, srcs, strip_prefix, dest, **kwargs):
"""Macro to copy files to a different directory and then create a filegroup.
This is used by the //:protobuf_python py_proto_library target to work around
an issue caused by Python source files that are part of the same Python
package being in separate directories.
Args:
srcs: The source files to copy and add to the filegroup.
strip_prefix: Path to the root of the files to copy.
dest: The directory to copy the source files into.
**kwargs: extra arguments that will be passesd to the filegroup.
"""
outs = [_RelativeOutputPath(s, strip_prefix, dest) for s in srcs]
native.genrule(
name = name + "_genrule",
srcs = srcs,
outs = outs,
cmd = " && ".join(
["cp $(location %s) $(location %s)" %
(s, _RelativeOutputPath(s, strip_prefix, dest)) for s in srcs]),
)
native.filegroup(
name = name,
srcs = outs,
**kwargs) | 4,766 |
def debug(debug_on=True):
"""Turn debugging of DICOM file reading and writing on or off.
When debugging is on, file location and details about the
elements read at that location are logged to the 'pydicom'
logger using python's logging module.
:param debug_on: True (default) to turn on debugging,
False to turn off.
"""
global logger, debugging
if debug_on:
logger.setLevel(logging.DEBUG)
debugging = True
else:
logger.setLevel(logging.WARNING)
debugging = False | 4,767 |
async def run():
"""Run the monitoring."""
set_log_levels(
logger="info",
logger_pyinsteon="info",
logger_messages="info",
logger_topics=True,
)
# await async_connect(host=HOST, username=USERNAME, password=PASSWORD)
await async_connect(device=DEVICE)
devices.subscribe(device_added)
await devices.async_load(workdir=PATH, id_devices=0)
await devices.async_save(workdir=PATH)
address = "453194"
device = devices[address]
if device:
await device.async_read_op_flags()
await device.async_read_ext_properties()
_LOGGER.info(
"LED_BLINK_ON_TX_ON: %s",
device.operating_flags[MOMENTARY_ON_OFF_TRIGGER].value,
)
device.operating_flags[MOMENTARY_ON_OFF_TRIGGER].new_value = True
await device.async_write_op_flags()
await device.async_write_ext_properties()
await device.async_read_op_flags()
await device.async_read_ext_properties()
_LOGGER.info(
"LED_BLINK_ON_TX_ON: %s",
device.operating_flags[MOMENTARY_ON_OFF_TRIGGER].value,
)
else:
_LOGGER.info("No device found for address: %s", address)
await devices.async_save(workdir=PATH)
await async_close() | 4,768 |
def make_parallel_transformer_config() -> t5_architecture.EncoderDecoder:
"""Returns an EncoderDecoder with parallel=True."""
dtype = jnp.bfloat16
num_attn_heads = 8
make_dropout = lambda: nn.Dropout(rate=0.1, broadcast_dims=(-2,))
make_layer_norm = layer_norm.T5LayerNorm
def _make_encoder_layer(shared_relative_position_bias):
assert shared_relative_position_bias is None
return t5_architecture.EncoderLayer(
attention=make_attention1(num_attn_heads, dtype),
mlp=make_mlp1(dtype),
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
relative_position_bias_factory=(
lambda: _make_relative_position_bias(num_attn_heads, dtype)),
parallel=True,
)
def _make_decoder_layer(shared_relative_position_bias):
assert shared_relative_position_bias is None
return t5_architecture.DecoderLayer(
self_attention=make_attention1(num_attn_heads, dtype),
encoder_decoder_attention=make_attention1(num_attn_heads, dtype),
mlp=make_mlp1(dtype),
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
relative_position_bias_factory=(
lambda: _make_relative_position_bias(num_attn_heads, dtype)),
parallel=True,
)
def _make_encoder(shared_token_embedder):
assert shared_token_embedder is None
return t5_architecture.Encoder(
num_layers=3,
token_embedder_factory=lambda: make_token_emb1(2_000, dtype),
layer_factory=_make_encoder_layer,
input_dropout_factory=make_dropout,
output_dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
dtype=dtype,
)
def _make_decoder(shared_token_embedder):
assert shared_token_embedder is None
return t5_architecture.Decoder(
num_layers=2,
token_embedder_factory=lambda: make_token_emb1(2_000, dtype),
layer_factory=_make_decoder_layer,
dropout_factory=make_dropout,
layer_norm_factory=make_layer_norm,
output_logits_factory=None,
dtype=dtype,
)
return t5_architecture.EncoderDecoder(
shared_token_embedder_factory=lambda: None,
encoder_factory=_make_encoder,
decoder_factory=_make_decoder,
) | 4,769 |
def commands():
"""
Serve models on RedisAI.
To serve a model associated with a run on a tracking server, set the MLFLOW_TRACKING_URI
environment variable to the URL of the desired server.
"""
pass | 4,770 |
def get_device_mapping(embedding_sizes, num_gpus, data_parallel_bottom_mlp,
experimental_columnwise_split, num_numerical_features):
"""Get device mappings for hybrid parallelism
Bottom MLP running on device 0. Embeddings will be distributed across among all the devices.
Optimal solution for partitioning set of N embedding tables into K devices to minimize maximal subset sum
is an NP-hard problem. Additionally, embedding tables distribution should be nearly uniform due to the performance
constraints. Therefore, suboptimal greedy approach with max bucket size is used.
Args:
embedding_sizes (Sequence[int]): embedding tables sizes
num_gpus (int): Default 8.
Returns:
device_mapping (dict):
"""
if num_numerical_features == 0:
bottom_mlp_ranks = []
elif data_parallel_bottom_mlp:
bottom_mlp_ranks = list(range(num_gpus))
else:
bottom_mlp_ranks = [0]
if experimental_columnwise_split:
gpu_buckets = num_gpus * [list(range(len(embedding_sizes)))]
vectors_per_gpu = [len(bucket) for bucket in gpu_buckets]
if num_numerical_features > 0:
vectors_per_gpu[0] += 1 # count bottom mlp
return MultiGpuMetadata(bottom_mlp_ranks=bottom_mlp_ranks,
rank_to_categorical_ids=gpu_buckets,
rank_to_feature_count=vectors_per_gpu)
if num_gpus > 4 and not data_parallel_bottom_mlp and num_numerical_features > 0:
# for higher no. of GPUs, make sure the one with bottom mlp has no embeddings
gpu_buckets = distribute_to_buckets(embedding_sizes, num_gpus - 1) # leave one device out for the bottom MLP
gpu_buckets.insert(0, [])
else:
gpu_buckets = distribute_to_buckets(embedding_sizes, num_gpus)
vectors_per_gpu = [len(bucket) for bucket in gpu_buckets]
if not data_parallel_bottom_mlp:
for rank in bottom_mlp_ranks:
vectors_per_gpu[rank] += 1 # count bottom mlp
return MultiGpuMetadata(bottom_mlp_ranks=bottom_mlp_ranks,
rank_to_categorical_ids=gpu_buckets,
rank_to_feature_count=vectors_per_gpu) | 4,771 |
def _generate_relative_positions_embeddings(length, depth,
max_relative_position, name):
"""Generates tensor of size [length, length, depth]."""
with tf.variable_scope(name):
relative_positions_matrix = _generate_relative_positions_matrix(
length, max_relative_position)
vocab_size = max_relative_position * 2 + 1
# Generates embedding for each relative position of dimension depth.
embeddings_table = tf.get_variable("embeddings", [vocab_size, depth])
embeddings = tf.gather(embeddings_table, relative_positions_matrix)
return embeddings | 4,772 |
def annotation():
"""Annotation file utilities.""" | 4,773 |
def continTapDetector(
fs: int, x=[], y=[], z=[], side='right',
):
"""
Detect the moments of finger-raising and -lowering
during a fingertapping task.
Function detects the axis with most variation and then
first detects several large/small pos/neg peaks, then
the function determines sample-wise in which part of a
movement or tap the acc-timeseries is, and defines the
exact moments of finger-raising, finger-lowering, and
the in between stopping moments.
Input:
- x, y, z (arr): all three one-dimensional data-
arrays containing one acc-axis each. Exact
labeling x/y/z is not important. Should have equal
lengths. Typically timeseries from one run.
- fs (int): corresponding sample frequency
- side (string): side where acc-data origin from
Return:
- tapTimes (list of lists): each list contains 4 timestamps
(in seconds from array-start) indicating moments of:
[finger-raise start, finger raise end,
finger-lowering start, finger-lowering end]
- moveTimes, restTimes: idem but then for 'other
movements' and rest periods (of > 1 sec), each list
contains the first and last timestamp of move/rest
period.
"""
# input sanity checks
if x != [] and y != []:
assert len(x) == len(y), f'Arrays X and Y should'
' have equal lengths'
if x != [] and z != []:
assert len(x) == len(z), f'Arrays X and Z should'
' have equal lengths'
if z != [] and y != []:
assert len(y) == len(z), f'Arrays X and Z should'
' have equal lengths'
assert side in ['left', 'right'], f'Side should be '
'left or right'
ax_arrs = []
for ax in [x, y, z]:
if ax != []: ax_arrs.append(ax)
# Find axis with most variation
maxVar = np.argmax([variation(arr) for arr in ax_arrs])
# maxRMS = np.argmax([sum(arr) for arr in ax_arrays])
sig = ax_arrs[maxVar] # acc-signal to use
# check data for pos/neg and order of magn
sig = check_PosNeg_and_Order(sig, fs)
# add differential of signal
sigdf = np.diff(sig)
# timestamps from start (in sec)
timeStamps = np.arange(0, len(sig), 1 / fs)
# Thresholds for movement detection
posThr = np.mean(sig)
negThr = -np.mean(sig)
# Find peaks to help movement detection
peaksettings = {
'peak_dist': 0.1,
'cutoff_time': .25,
}
# find relevant positive peaks
posPeaks = find_peaks(
sig,
height=(posThr, np.max(sig)),
distance=fs * .05, # settings[task]['peak_dist']
)[0]
# select Pos-peaks with surrounding >> Pos and Neg Diff
endPeaks = [np.logical_or(
any(sigdf[i -3:i + 3] < np.percentile(sig, 10)),
any(sigdf[i -3:i + 3] > np.percentile(sig, 90))
) for i in posPeaks]
endPeaks = posPeaks[endPeaks]
# delete endPeaks from posPeaks
for i in endPeaks:
idel = np.where(posPeaks == i)
posPeaks = np.delete(posPeaks, idel)
# delete endPeaks which are too close after each other
# by starting with std False before np.diff, the diff-
# scores represent the distance to the previous peak
tooclose = endPeaks[np.append(
np.array(False), np.diff(endPeaks) < (fs / 6))]
for p in tooclose:
i = np.where(endPeaks == p)
endPeaks = np.delete(endPeaks, i)
posPeaks = np.append(posPeaks, p)
# double check endPeaks with np.diff
hop = 3
endP2 = []
for n in np.arange(hop, sig.shape[0]):
if np.logical_and(
any(np.diff(sig)[n - hop:n] > np.percentile(sig, 90)),
any(np.diff(sig)[n- hop:n] < np.percentile(sig, 10))
): # if diff is above extremes within hop-distance
endP2.append(n)
endP2 = list(compress(endP2, np.diff(endP2) > hop))
for p2 in endP2: # add to endPeaks if not containing
if min(abs(p2 - endPeaks)) > 5:
endPeaks = np.append(endPeaks, p2)
smallNeg = find_peaks(
-1 * sig, # convert pos/neg for negative peaks
height=(-.5e-7, abs(np.min(sig)) * .5),
distance=fs * peaksettings['peak_dist'] * .5,
prominence=abs(np.min(sig)) * .05,
# wlen=40,
)[0]
# largeNeg = find_peaks(
# -1 * sig,
# height=abs(np.min(sig)) * .4,
# # first value is min, second is max
# distance=fs * peaksettings['peak_dist'],
# # prominence=np.min(yEpoch) * .1,
# # wlen=40,
# )[0]
# Lists to store collected indices and timestamps
tapi = [] # list to store indices of tap
movei = [] # list to store indices of other move
resti = [] # list to store indices of rest
resttemp = [] # temp-list to collect rest-indices [1st, Last]
starttemp = [np.nan] * 6 # for during detection process
# [startUP, fastestUp, stopUP,
# startDown, fastestDown, stopDown]
tempi = starttemp.copy() # to start process
state = 'lowRest'
# Sample-wise movement detection
for n, y in enumerate(sig[:-1]):
if state == 'otherMov':
# PM LEAVE OUT OTHER-MOV-STATE
if n in endPeaks: # during other Move: end Tap
tempi[-1] = n # finish and store index list
if (tempi[-1] - tempi[0]) > fs * .1:
movei.append(tempi) # save if long enough
state='lowRest'
tempi = starttemp.copy() # after end: start lowRest
continue
try:
next10 = sum([negThr < Y < posThr for Y in sig[range(n, n + int(fs * .2)
)]])
if next10 > (fs * .2) * .8:
# End 'other move' if 8 / 10 next samples are inactive
tempi[-1] = n # END of OTHER MOVE
if (tempi[-1] - tempi[0]) > fs * .1:
movei.append(tempi)
tempi = starttemp.copy() # after end: start lowRest
state = 'lowRest'
except IndexError: # prevent indexerror out of range for next10
# print('end of timeseries')
continue
elif state == 'lowRest':
if np.logical_and(
y > posThr, # if value is over pos-threshold
sigdf[n] > np.percentile(sigdf, 75) # AND diff is over Thr
# any([Y in posPeaks for Y in range(n, n + int(fs * .2))]) # USED IN PAUSED
):
if resttemp: # close and store active rest period
resttemp.append(n) # Add second and last rest-ind
if (resttemp[1] - resttemp[0]) > fs: # if rest > 1 sec
resti.append(resttemp) # add finished rest-indices
resttemp = [] # reset resttemp list
state='upAcc1'
tempi[0] = n # START TIME Tap-UP
# print('save start UP', n)
# elif np.logical_or(
# np.logical_or(n in posPeaks, n in smallNeg[0]),
# ~ (negThr < y < posThr)
# ):
# if resttemp: # close and store active rest period
# resttemp.append(n) # Add second and last rest-ind
# if (resttemp[1] - resttemp[0]) > fs: # if rest > 1 sec
# resti.append(resttemp) # add finished rest-indices
# resttemp = [] # reset resttemp list
# state = 'otherMov'
# tempi.append(n) # START TIME Othermovement
elif n in endPeaks: # during lowRest, endPeak found
resttemp.append(n) # Add second and last rest-ind
if (resttemp[1] - resttemp[0]) > fs: # if rest > 1 sec
resti.append(resttemp) # add finished rest-indices
resttemp = [] # reset resttemp list
state='lowRest'
tempi = starttemp.copy() # after end: start lowRest
continue
else: # lowRest stays lowRest
if not resttemp: # if rest-temp list is empty
resttemp.append(n) # start of rest period
elif state == 'upAcc1':
if n in posPeaks:
state='upAcc2'
# acc getting less, veloc still increasing
# print('acc-peakUP detected', n)
elif n in endPeaks:
state = 'downDec2'
# emergency out if endPeak is found
elif state == 'upAcc2':
if y < 0: # crossing zero-line, start of decelleration
tempi[1] = n # save n as FASTEST MOMENT UP
state='upDec1'
# print('fastest point UP', n)
elif n in endPeaks:
state = 'downDec2'
# emergency out if endPeak is found
elif state=='upDec1':
if n in smallNeg:
state='upDec2'
elif n in endPeaks:
state = 'downDec2'
# emergency out if endPeak is found
elif state == 'upDec2':
if np.logical_or(y > 0, sigdf[n] < 0):
# if acc is pos, or goes into acceleration
# phase of down movement
state='highRest' # end of UP-decell
tempi[2]= n # END OF UP !!!
elif n in endPeaks:
state = 'downDec2'
# emergency out if endPeak is found
elif state == 'highRest':
if np.logical_and(
y < negThr,
sigdf[n] < 0 #np.percentile(sigdf, 25)
# from highRest: LOWERING starts when acc
# gets below negative-threshold AND when
# differential is negative
):
state='downAcc1'
tempi[3] = n # START OF LOWERING
# print('LOWERING START', n)
elif n in endPeaks:
state = 'downDec2'
# emergency out if endPeak is found
# elif state == 'downAcc1':
# if n in largeNeg[0]:
# state='downAcc2'
# elif n - tempi[2] > (fs * peaksettings[task]['cutoff_time']):
# # if down-move takes > defined cutoff time
# state = 'otherMov' # reset to start-state
# movei.append(tempi) # newly added
# tempi = [] # newly added
# elif state == 'downAcc2':
elif state == 'downAcc1':
if np.logical_and(
y > 0,
sigdf[n] > 0
):
# if acceleration gets positive again and keeps
# one increasing (sigdf) downwards acceleration
# is finished -> ADD FASTEST DOWNW MOMENT
state='downDec1'
tempi[4] = n
# print('fastest DOWN @', n)
elif n in endPeaks:
state = 'downDec2'
# emergency out if endPeak is found
# elif n - tempi[2] > (fs * peaksettings[task]['cutoff_time']):
# # if down-move takes > defined cutoff time
# state = 'otherMov' # reset to start-state
# movei.append(tempi) # newly added
# tempi = [] # newly added
elif state == 'downDec1':
if n in endPeaks:
state = 'downDec2'
elif state=='downDec2':
if np.logical_or(
y < 0,
sigdf[n] < 0
): # after large pos-peak, before around impact
# artefectual peaks
state='lowRest'
tempi[5] = n
# store current indices
tapi.append(tempi)
tempi = starttemp.copy() # restart w/ 6*nan
# drop first tap due to starting time
tapi = tapi[1:]
# convert detected indices-lists into timestamps
tapTimes = [] # list to store timeStamps of tap
# moveTimes = [] # alternative list for movements
# restTimes = [] # list to sore rest-timestamps
for tap in tapi: tapTimes.append(
[timeStamps[I] for I in tap if I is not np.nan]
)
# for tap in movei: moveTimes.append(timeStamps[tap])
# for tap in resti: restTimes.append(timeStamps[tap])
return tapi, tapTimes, endPeaks | 4,774 |
def parallelize(df, func):
""" Split data into max core partitions and execute func in parallel.
https://www.machinelearningplus.com/python/parallel-processing-python/
Parameters
----------
df : pandas Dataframe
func : any functions
Returns
-------
data : pandas Dataframe
Returned dataframe of func.
"""
cores = cpu_count()
data_split = np.array_split(df, cores)
pool = Pool(cores)
data = pd.concat(pool.map(func, data_split), ignore_index=1)
pool.close()
pool.join()
return data | 4,775 |
def get_functional_groups(alkoxy_mol):
"""
given a molecule object `alkoxy_mol`. This method returns
a dictionary of groups used in the Vereecken SAR with the
key being the group and the value being the number of occurances
it has.
"""
#print 'getting groups from {}'.format(alkoxy_mol.toSMILES())
alkoxy_mol.assignAtomIDs()
labeled_atoms = alkoxy_mol.getLabeledAtoms()
assert labeled_atoms['*1'].symbol == 'C'
assert labeled_atoms['*3'].symbol == 'C', alkoxy_mol.toAdjacencyList() + str(labeled_atoms)
alpha_groups = get_atom_groups(labeled_atoms['*1'])
beta_groups = get_atom_groups(labeled_atoms['*3'])
# find cyclic groups here (after project finished)
all_groups = {}
for label, num in alpha_groups.items():
all_groups['alpha{}'.format(label)] = num
for label, num in beta_groups.items():
all_groups['beta{}'.format(label)] = num
return all_groups | 4,776 |
def rough(material, coverage, scale, det, e0=20.0, withPoisson=True, nTraj=defaultNumTraj, dose=defaultDose, sf=True, bf=True, xtraParams=defaultXtraParams):
"""rough(material, coverage, scale, det, [e0=20.0], [withPoisson=True], [nTraj=defaultNumTraj], [dose = 120.0], [sf=True], [bf=True], [xtraParams={}])
Monte Carlo simulate a spectrum from a rough surface with roughness modeled as square pillars of the specified scale and fractional coverage.
The features are also offset by a randomized x,y offset of size approximately scale to ensure that the beam doesn't always strike at the same sort of a position.
+ material - Composition of material
+ coverage of pillars on surface (0.0 to 1.0 -> 0% to 100%)
+ scale - height and width of pillars
+ depth - Depth of trough"""
tmp = u"MC simulation of a %0.2lg um %d%% coverage rough surface of %s at %0.1f keV%s%s" % (1.0e6 * scale, int(100.0 * coverage), material, e0, (" + CSF" if sf else ""), (" + BSF" if bf else ""))
return base(det, e0, withPoisson, nTraj, dose, sf, bf, tmp, buildRough, { "Scale" : scale, "Coverage" : coverage, "Size" : 1.0e-5, "Material" : material }, xtraParams) | 4,777 |
def test_string_with_correct_type():
"""String type"""
assert val.is_string('test', desc='test') == 'test' | 4,778 |
def jsonify(comment_lower: str) -> str:
"""pyNastran: SPOINT={'id':10, 'xyz':[10.,10.,10.]}"""
sline = comment_lower.split('=')
rhs = sline[1].rstrip()
return rhs.replace("'", '"').replace('}', ',}').replace(',,}', ',}') | 4,779 |
def set_attributes_polling(test_case, device_proxy, device_server, poll_periods):
"""Set attribute polling and restore after test
Parameters
----------
test_case : unittest.TestCase instance
device_proxy : tango.DeviceProxy instance
device_server : tango.Device instance
The instance of the device class `device_proxy` is talking to
poll_periods : dict {"attribute_name" : poll_period}
`poll_poriod` in milliseconds as per Tango APIs, 0 or falsy to disable
polling.
Return value
------------
restore_polling : function
This function can be used to restore polling if it is to happen before the end of
the test. Should be idempotent if only one set_attributes_polling() is called per
test.
"""
# TODO (NM 2016-04-11) check if this is still needed after upgrade to Tango 9.x For
# some reason it only works if the device_proxy is used to set polling, but the
# device_server is used to clear the polling. If polling is cleared using device_proxy
# it seem to be impossible to restore the polling afterwards.
attributes = poll_periods.keys()
initial_polling = {
attr: device_proxy.get_attribute_poll_period(attr) for attr in attributes
}
retry_time = 0.5
for attr in attributes:
initial_period = initial_polling[attr]
new_period = poll_periods[attr]
# Disable polling for attributes with poll_period of zero / falsy
# zero initial_period implies no polling currently configed
if not new_period and initial_period != 0:
LOGGER.debug("not setting polling for {}".format(attr))
device_server.stop_poll_attribute(attr)
else:
# Set the polling
LOGGER.debug("setting polling for {}".format(attr))
try:
device_proxy.poll_attribute(attr, new_period)
# TODO See (NM 2016-04-11) comment below about back-to-back calls
time.sleep(0.05)
except Exception:
retry = True
LOGGER.warning(
"Setting polling of attribute {} in {} due to unhandled"
"exception in poll_attribute command".format(attr, retry_time),
exc_info=True,
)
else:
retry = False
if retry:
time.sleep(retry_time)
device_proxy.poll_attribute(attr, new_period)
def restore_polling():
"""Restore initial polling, for use during cleanup / teardown"""
for attr, period in initial_polling.items():
if period == 0:
continue # zero period implies no polling, nothing to do
try:
device_proxy.poll_attribute(attr, period)
# TODO (NM 2016-04-11) For some reason Tango doesn't seem to handle
# back-to-back calls, and even with the sleep it sometimes goes bad. Need
# to check if this is fixed (and core dumps) when we upgrade to Tango 9.x
time.sleep(0.05)
except Exception:
retry = True
LOGGER.warning(
"retrying restore of attribute {} in {} due to unhandled"
"exception in poll_attribute command".format(attr, retry_time),
exc_info=True,
)
else:
retry = False
if retry:
time.sleep(retry_time)
device_proxy.poll_attribute(attr, period)
test_case.addCleanup(restore_polling)
return restore_polling | 4,780 |
def test_process_covid_csv_data() -> None:
"""Checks function test_process_covid_csv_data calculates correct data from
set file nation_2021-10-28.csv"""
last7days_cases , current_hospital_cases , total_deaths = \
process_covid_csv_data ( parse_csv_data (
'nation_2021-10-28.csv' ) )
assert last7days_cases == 240_299
assert current_hospital_cases == 7_019
assert total_deaths == 141_544 | 4,781 |
def get_available_engine(
fp16: bool = False, ddp: bool = False, amp: bool = False, apex: bool = False
) -> "IEngine":
"""Returns available engine based on given arguments.
Args:
fp16 (bool): option to use fp16 for training. Default is `False`.
ddp (bool): option to use DDP for training. Default is `False`.
amp (bool): option to use APEX for training. Default is `False`.
apex (bool): option to use APEX for training. Default is `False`.
Returns:
IEngine which match requirements.
"""
from catalyst.engines.torch import (
DataParallelEngine,
DeviceEngine,
DistributedDataParallelEngine,
)
if fp16 and not amp and not apex:
amp = SETTINGS.amp_required or (SETTINGS.amp_required and SETTINGS.apex_required)
apex = SETTINGS.apex_required and (not SETTINGS.amp_required)
if amp:
assert (
SETTINGS.amp_required
), "catalyst[amp] is not available, to install it, run `pip install catalyst[amp]`."
assert not apex, "Could not use both apex and amp engines"
from catalyst.engines.amp import (
AMPEngine,
DataParallelAMPEngine,
DistributedDataParallelAMPEngine,
)
if apex:
assert (
SETTINGS.apex_required
), "catalyst[apex] is not available, to install it, run `pip install catalyst[apex]`."
assert not amp, "Could not use both apex and amp engines"
from catalyst.engines.apex import (
APEXEngine,
DataParallelAPEXEngine,
DistributedDataParallelAPEXEngine,
)
is_multiple_gpus = NUM_CUDA_DEVICES > 1
if not IS_CUDA_AVAILABLE:
return DeviceEngine("cpu")
elif is_multiple_gpus:
if ddp:
if amp:
return DistributedDataParallelAMPEngine()
elif apex:
return DistributedDataParallelAPEXEngine()
else:
return DistributedDataParallelEngine()
else:
if amp:
return DataParallelAMPEngine()
elif apex:
return DataParallelAPEXEngine()
else:
return DataParallelEngine()
else:
if amp:
return AMPEngine()
elif apex:
return APEXEngine()
else:
return DeviceEngine("cuda") | 4,782 |
def disk_usage(pathname):
"""Return disk usage statistics for the given path"""
### Return tuple with the attributes total,used,free in bytes.
### usage(total=118013599744, used=63686647808, free=48352747520)
return shutil.disk_usage(pathname) | 4,783 |
def get_default_log_config():
"""Get the default logging configuration.
Returns:
dict: The default logging configuration.
"""
root = os.path.dirname(__file__)
config_file = os.path.join(root, "logging.yaml")
with open(config_file, "r") as file_object:
data = yaml.load(file_object, yaml.FullLoader)
return data["logging"] | 4,784 |
def create_new_deployment(
runner: Runner, deployment_arg: str, expose: PortMapping,
add_custom_nameserver: bool
) -> Tuple[str, str]:
"""
Create a new Deployment, return its name and Kubernetes label.
"""
span = runner.span()
run_id = runner.session_id
runner.show(
"Starting network proxy to cluster using "
"new Deployment {}".format(deployment_arg)
)
def remove_existing_deployment(quiet=False):
if not quiet:
runner.show("Cleaning up Deployment {}".format(deployment_arg))
runner.check_call(
runner.kubectl(
"delete",
"--ignore-not-found",
"svc,deploy",
"--selector=telepresence=" + run_id,
)
)
runner.add_cleanup("Delete new deployment", remove_existing_deployment)
remove_existing_deployment(quiet=True)
command = [
"run", # This will result in using Deployment:
"--restart=Always",
"--limits=cpu=100m,memory=256Mi",
"--requests=cpu=25m,memory=64Mi",
deployment_arg,
"--image=" + get_image_name(expose),
"--labels=telepresence=" + run_id,
]
# Provide a stable argument ordering. Reverse it because that happens to
# make some current tests happy but in the long run that's totally
# arbitrary and doesn't need to be maintained. See issue 494.
for port in sorted(expose.remote(), reverse=True):
command.append("--port={}".format(port))
if expose.remote():
command.append("--expose")
# If we're on local VM we need to use different nameserver to prevent
# infinite loops caused by sshuttle:
if add_custom_nameserver:
command.append(
"--env=TELEPRESENCE_NAMESERVER=" + get_alternate_nameserver()
)
try:
runner.check_call(runner.kubectl(command))
except CalledProcessError as exc:
raise runner.fail(
"Failed to create deployment {}:\n{}".format(
deployment_arg, exc.stderr
)
)
span.end()
return deployment_arg, run_id | 4,785 |
def score_latency(
references, reference_wavs, partial_translations, target_language="en-US"
):
"""Measures the "final" translation lag after all corrections have been made."""
logger = logging.getLogger("evaluation")
tokenizer = get_tokenizer(target_language)
min_len = min(len(partial_translations), len(references))
if len(partial_translations) != len(references):
logger.warning(
f"Found {len(references)} references, {len(partial_translations)} partial "
+ f"translations. Evaluating only the first {min_len}"
)
partial_translations = partial_translations[:min_len]
references = references[:min_len]
# Make case insensitive and tokenize
partial_translations_tokenized = [
[(t_time, tokenizer.tokenize(t.upper())) for t_time, t in transcript]
for transcript in partial_translations
]
references = [tokenizer.tokenize(r.upper()) for r in references]
# Compute total lag
output_words, total_lag = 0, 0
for reference, (_, reference_wav), partial_translation in zip(
references, reference_wavs, partial_translations_tokenized
):
if len(partial_translation) == 0:
continue
final_time, final_translation = partial_translation[-1]
reference_duration = get_duration_seconds(reference_wav)
for j in range(1, len(final_translation) + 1):
# Compare a time a word was finalized in the output
# to the time its corresponding word was uttered
finalization_time = get_finalization_time(
final_translation, j, partial_translation
)
original_token = int(j * len(reference) / len(final_translation))
original_time = get_token_time(
original_token, reference, reference_duration
)
total_lag += max(0, finalization_time - original_time)
output_words += 1
return total_lag / max(1, output_words) | 4,786 |
def stack_analysis_benchmark(queue, threads, stack_analysis, thread_count,
python_payload, maven_payload, npm_payload):
"""Stack analysis benchmark."""
# TODO: read automagically from the filelist
manifests = (
("maven", "clojure_1_6_0.xml"),
("maven", "clojure_1_7_0.xml"),
("maven", "clojure_1_8_0.xml"),
("maven", "clojure_junit.xml"),
("pypi", "click_6_star.txt"),
("pypi", "array_split.txt"),
("pypi", "fastlog_urllib_requests.txt"),
("pypi", "requests_latest.txt"),
("pypi", "numpy_latest.txt"),
("pypi", "flask_latest.txt"),
("pypi", "scipy_latest.txt"),
("pypi", "pygame_latest.txt"),
("pypi", "pyglet_latest.txt"),
("pypi", "dash_latest.txt"),
("pypi", "pudb_latest.txt"),
("pypi", "pytest_latest.txt"),
("pypi", "numpy_1_11_0.txt"),
("pypi", "numpy_1_12_0.txt"),
("pypi", "numpy_1_16_2.txt"),
("pypi", "numpy_1_16_3.txt"),
("pypi", "numpy_scipy.txt"),
("pypi", "pytest_2_0_0.txt"),
("pypi", "pytest_2_0_1.txt"),
("pypi", "pytest_3_2_2.txt"),
("pypi", "requests_2_20_0.txt"),
("pypi", "requests_2_20_1.txt"),
("pypi", "requests_2_21_0.txt"),
("pypi", "scipy_1_1_0.txt"),
("pypi", "scipy_1_2_0.txt"),
("pypi", "scipy_1_2_1.txt"),
("npm", "array.json"),
("npm", "dependency_array.json"),
("npm", "dependency_emitter_component.json"),
("npm", "dependency_jquery.json"),
("npm", "dependency_jquery_react.json"),
("npm", "dependency_lodash.json"),
("npm", "dependency_lodash_react_jquery.json"),
("npm", "dependency_react.json"),
("npm", "dependency_to_function.json"),
("npm", "dependency_to_function_vue_array.json"),
("npm", "dependency_underscore.json"),
("npm", "dependency_underscore_react_jquery.json"),
("npm", "dependency_vue.json"),
("npm", "dependency_vue_to_function.json"),
("npm", "empty.json"),
("npm", "jquery.json"),
("npm", "lodash.json"),
("npm", "mocha.json"),
("npm", "no_requirements.json"),
("npm", "underscore.json"),
("npm", "wisp.json"),
)
for t in range(thread_count):
manifest_idx = randint(0, len(manifests) - 1)
manifest = manifests[manifest_idx]
with log.indent():
log.info("Stack analysis")
ecosystem = manifest[0]
manifest_file = manifest[1]
t = Thread(target=stack_analysis.start,
args=(t, ecosystem, manifest_file, queue))
t.start()
threads.append(t) | 4,787 |
def shapelet_with_w_term(
coords, frequency, coeffs, beta, delta_lm, lm, dtype=np.complex128
):
"""
shapelet: outputs visibilities corresponding to that of a shapelet
Inputs:
coords: coordinates in (u,v) space with shape (nrow, 3)
frequency: frequency values with shape (nchan,)
coeffs: shapelet coefficients with shape, where
coeffs[3, 4] = coeffs_l[3] * coeffs_m[4] (nsrc, nmax1, nmax2)
beta: characteristic shapelet size with shape (nsrc, 2)
delta_l: pixel size in l dim
delta_m: pixel size in m dim
lm: source center coordinates of shape (nsource, 2)
Returns:
out_shapelets: Shapelet with shape (nrow, nchan, nsrc)
"""
nrow = coords.shape[0]
nsrc = coeffs.shape[0]
nchan = frequency.shape[0]
out_shapelets = np.empty((nrow, nchan, nsrc), dtype=np.complex128)
delta_l, delta_m = delta_lm
for row in range(nrow):
u, v, w = coords[row, :]
for chan in range(nchan):
fu = u * 2 * np.pi * frequency[chan] / lightspeed
fv = v * 2 * np.pi * frequency[chan] / lightspeed
for src in range(nsrc):
nmax1, nmax2 = coeffs[src, :, :].shape
beta_u, beta_v = beta[src, :]
l, m = lm[src, :]
if beta_u == 0 or beta_v == 0:
out_shapelets[row, chan, src] = 1
continue
tmp_shapelet = 0 + 0j
for n1 in range(nmax1):
for n2 in range(nmax2):
tmp_shapelet += (
0
if coeffs[src][n1, n2] == 0
else coeffs[src][n1, n2]
* basis_function(
n1, fu, beta_u, True, delta_x=delta_l
)
* basis_function(
n2, fv, beta_v, True, delta_x=delta_m
)
)
w_term = phase_steer_and_w_correct(
(u, v, w), (l, m), frequency[chan]
)
out_shapelets[row, chan, src] = tmp_shapelet * w_term
return out_shapelets | 4,788 |
async def mocktext(ctx, *texts):
"""Converts input into a mocking sentence"""
sentence = " ".join(texts[:])
sentence.lower()
msg = "".join(choice((str.upper, str.lower))(c) for c in sentence)
await ctx.send(msg) | 4,789 |
def test_person__DeletePersonForm__2(person_data, browser):
"""`DeletePersonForm` can be cancelled."""
browser.login('editor')
browser.open(browser.PERSON_DELETE_URL)
browser.getControl('No, cancel').click()
assert 'Deletion canceled.' == browser.message
assert browser.PERSON_EDIT_URL == browser.url | 4,790 |
def renderLayerPostProcess(q=1,ki=1,sn="string"):
"""
http://help.autodesk.com/cloudhelp/2019/ENU/Maya-Tech-Docs/CommandsPython/renderLayerPostProcess.html
-----------------------------------------
renderLayerPostProcess is NOT undoable, queryable, and NOT editable.
Post process the results when rendering is done with. Presently this generates
a layered PSD file using individual iff files.
-----------------------------------------
Return Value:
None
In query mode, return type is based on queried flag.
-----------------------------------------
Flags:
-----------------------------------------
ki : keepImages [boolean] ['query']
When set to on, the original iff images are kept after the conversion to PSD. Default is to remove them.
-----------------------------------------
sn : sceneName [string]
Specifies the scene name for interactive batch rendering.
""" | 4,791 |
def CylindricalVectorsToCartesian(coordinates, data):
"""
Project the supplied cylindrical coordinates (r-phi-z) vectors to 3D Cartesian
(x-y-z). coordinates must be in Cartesian.
"""
if optimise.DebuggingEnabled():
assert(len(coordinates) == len(data))
for i, coord in enumerate(coordinates):
assert(len(coord) == 3)
assert(len(data[i]) == 3)
newData = numpy.empty((len(data), 3))
for i, coord in enumerate(coordinates):
datum = data[i]
rMag = L2Norm(coord[:2])
x = [coord[0] / rMag, -coord[1] / rMag]
y = [-x[1], x[0]]
newData[i, :] = [datum[0] * x[0] + datum[1] * x[1], datum[0] * y[0] + datum[1] * y[1], datum[2]]
return newData | 4,792 |
def _entropy_counter2(arr):
"""
calculate the base 2 entropy of the distribution given in `arr` using a
`Counter` and the `values` method (for python3)
"""
arr_len = len(arr)
if arr_len == 0:
return 0
log_arr_len = np.log2(len(arr))
return -sum(val * (np.log2(val) - log_arr_len)
for val in Counter(arr).values()) / arr_len | 4,793 |
def update_deleted_strain(_, strain_to_del):
"""Update ``deleted-strain`` var.
This happens after a user clicks the OK btn in the confirm strain
deletion modal.
We also delete the files associated with the strain at this step.
:param _: User clicked the OK btn
:param strain_to_del: Strain corresponding to del btn user clicked
:type strain_to_del: str
"""
remove(path.join(USER_DATA_DIR, strain_to_del + ".gvf"))
rmtree(path.join(USER_SURVEILLANCE_REPORTS_DIR, strain_to_del))
return strain_to_del | 4,794 |
def get_changes_between_models(model1, model2, excludes=None):
"""
Return a dict of differences between two model instances
"""
if excludes is None:
excludes = []
changes = {}
for field in model1._meta.fields:
if (isinstance(field, (fields.AutoField,
fields.related.RelatedField))
or field.name in excludes):
continue
if field.value_from_object(model1) != field.value_from_object(model2):
changes[field.verbose_name] = (field.value_from_object(model1),
field.value_from_object(model2))
return changes | 4,795 |
def write_transaction(connection, signed_transaction):
"""Write a transaction to the backlog table.
Args:
signed_transaction (dict): a signed transaction.
Returns:
The result of the operation.
"""
raise NotImplementedError | 4,796 |
def get_intersect(x1, y1, x2, y2):
"""
Returns the point of intersection of the lines or None if lines are parallel
Ex. p1=(x1,x2)... line_intersection((p1,p2), (p3,p4))
a1: [x, y] a point on the first line
a2: [x, y] another point on the first line
b1: [x, y] a point on the second line
b2: [x, y] another point on the second line
"""
s = np.vstack([x1, y1, x2, y2]) # s for stacked
h = np.hstack((s, np.ones((4, 1)))) # h for homogeneous
l1 = np.cross(h[0], h[1]) # get first line
l2 = np.cross(h[2], h[3]) # get second line
x, y, z = np.cross(l1, l2) # point of intersection
if z == 0: # lines are parallel
return None, None
return x / z, y / z | 4,797 |
def removeDuplicateColumns(df):
"""
Removes columns that have a duplicate name.
:return pd.DataFrame:
"""
duplicates = getDuplicates(df.columns)
done = False
idx = 0
df_result = df.copy()
additions_dict = {}
while not done:
if idx >= len(df_result.columns):
done = True
break
column = df_result.columns[idx]
if column in duplicates:
df1 = df_result[column]
values = df1.iloc[:,1]
del df_result[column]
duplicates.remove(column)
additions_dict[column] = values
else:
idx += 1
df_add = pd.DataFrame(additions_dict)
df_result = pd.concat([df_result, df_add], axis=1, sort=True)
return df_result | 4,798 |
def load_files(file_path_smh, file_path_ic, datastruct):
"""Load the files by asking the controller the filepaths smh and ic provided by the user and store the appropriate
data in the data structure object (dictionary). Before storing, the data are verified"""
dm3_meta_smh = dm3_lib.DM3(file_path_smh)
dm3_meta_ic = dm3_lib.DM3(file_path_ic)
verify_i(dm3_meta_smh.imagedata, dm3_meta_ic.imagedata)
pixel_smh = dm3_meta_smh.pxsize
pixel_ic = dm3_meta_ic.pxsize
verify_p(pixel_smh[0], pixel_ic[0])
verify_p_unit(pixel_smh[1].decode("ascii"), pixel_ic[1].decode("ascii"))
data.SMGData.store(datastruct, 'ISMHexp', dm3_meta_smh.imagedata)
data.SMGData.store(datastruct, 'p', pixel_smh[0])
data.SMGData.store(datastruct, 'ICref', dm3_meta_ic.imagedata)
data.SMGData.store(datastruct, 'pref', pixel_ic[0])
print('Files loaded')
print('Pixel size SMH: ', pixel_smh[0], 'nm')
print('Pixel size Reference: ', pixel_ic[0], 'nm') | 4,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.