content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def parse_args():
"""Parse the arguments."""
parser = argparse.ArgumentParser(
"dashboard", description="Data Visualization for the simulation outcome"
)
parser.add_argument(
"--datadir",
type=str,
required=True,
help="The path to the simulation data folder.",
)
parser.add_argument(
"--env_name",
type=str,
default=None,
help="The name of the environment to create.",
)
arguments = parser.parse_args()
return arguments | 4,400 |
def running(outputlabel):
"""
Print a new message to stdout with the tag "Running".
Parameters:
outputlabel - Required: Message to be printed (str)
"""
print("[ "+'\033[0;37m'+"RUNNING "+'\033[0;39m'+"] " + outputlabel, end="\r", flush=True)
logging.info(outputlabel) | 4,401 |
def test_build_dynamic__with_location_mobility_data(monkeypatch):
"""
Ensure dynamic mixing matrix can use location-based mobility data set by the user + Google.
"""
def get_fake_mobility_data(*args, **kwargs):
vals = {"work": [1, 1.5, 1.3, 1.1]}
days = [0, 1, 2, 3]
return vals, days
monkeypatch.setattr(mobility, "get_mobility_data", get_fake_mobility_data)
#monkeypatch.setattr(location_adjuster, "get_country_mixing_matrix", _get_country_mixing_matrix)
mobility_params = {
"mixing": {
"school": {
"append": False,
"times": get_date_from_base([0, 1, 2, 3]),
"values": [1, 0.5, 0.3, 0.1],
}
},
"age_mixing": None,
"microdistancing": {},
"square_mobility_effect": False,
**UNTESTED_PARAMS,
}
mm_func = build_dynamic_mixing_matrix(
base_matrices=MIXING_MATRICES,
country=Country(iso3="AUS"),
mobility=Mobility(**mobility_params),
)
mm = mm_func(0)
assert_allclose(mm, MM, atol=0.01, verbose=True)
mm = mm_func(2)
expected_mm = MM.copy() + (0.3 - 1) * SCHOOL_MM + (1.3 - 1) * WORK_MM
assert_allclose(mm, expected_mm, atol=0.01, verbose=True) | 4,402 |
def margin_to_brightness(margin, max_lead=30, pct_pts_base=0):
""""Tweak max_lead and pct_pts_base to get the desired brightness range"""
return int((abs(margin) / max_lead) * 100) + pct_pts_base | 4,403 |
def RotateFullImage2D(src, dst, angle, scale=1.0,
interp=InterpolationType.linear):
"""\
Rotate an image resizing the output to fit all the pixels.
Rotates an image clockwise by a given angle (in degrees). The values of
unknown pixels in the output image are set to 0. The output Image is
guaranteed to contain all the pixels of the rotated image. Thus, its
dimensions can be different from those of the input one. An optional scale
parameter can be provided: if set, the image will also be scaled.
:param src: source image
:param dst: destination image
:param angle: the rotation angle in degrees
:param scale: scaling factor
:param interp: InterpolationType to be used
:return: None
"""
return _ecvl.RotateFullImage2D(src, dst, angle, scale, interp) | 4,404 |
def steer(robots, epsilon):
"""Steer towards vrand but only as much as allowed by the dynamics
Input arguments:
robots = robot classes
epsilon = maximum allowed distance traveled
"""
#find minTimes for nearest nodes of all robots
minTimes = []
for r in range(0, len(robots)):
nearestNodeIdx = robots[r].nearestNodeIdx
graphDict = robots[r].graph.nodes[nearestNodeIdx]
vnearest = list(graphDict.values())
nearestTime = np.asarray(vnearest[1])
minTimes.append(nearestTime)
#steer towards vrand
for r in range(0, len(robots)):
nearestNodeIdx = robots[r].nearestNodeIdx
graphDict = robots[r].graph.nodes[nearestNodeIdx]
vnearest = list(graphDict.values())
vrand = robots[r].vrand
nearestTime = np.asarray(vnearest[1])
nearestNode = np.asarray(vnearest[0])
dist = vrand - nearestNode
normDist = np.sqrt(np.sum((nearestNode - vrand)**2))
s = min(epsilon,normDist)
travelTime = s/robots[r].uMax
deltaTcost = travelTime - (nearestTime - min(minTimes))
if deltaTcost > 0:
vnew = np.around(nearestNode + robots[r].uMax*deltaTcost*dist/normDist)
distVnew = np.sqrt(np.sum((nearestNode - vnew)**2))
travelTimeVnew = distVnew/robots[r].uMax
else:
vnew = nearestNode
travelTimeVnew = 0
totalTimeVnew = travelTimeVnew + nearestTime
discretization = robots[r].discretization
if not (0 <= vnew[0] < discretization[0] and 0 <= vnew[1] < discretization[1]):
if vnew[0] >= discretization[0]:
vnew[0] = discretization[0]-1
if vnew[1] >= discretization[1]:
vnew[1] = discretization[1]-1
if vnew[0] < 0:
vnew[0] = 0
if vnew[1] < 0:
vnew[1] = 0
information = getInformationGain(robots[r], vnew)
robots[r].vnew = np.around(vnew)
robots[r].vnewCost = round(travelTimeVnew,1)
robots[r].totalTime = round(totalTimeVnew,1)
robots[r].vnewInformation = information | 4,405 |
def render_practice_text_field_validation1(request):
"""テキストフィールドのバリデーションの練習"""
template = loader.get_template(
'webapp1/practice/vuetify-text-field-validation1.html')
# -----------------------------------
# 1
# 1. host1/webapp1/templates/webapp1/practice/vuetify-text-field-validation1.html を取ってきます。
# ----------------------------------------------------
context = {
}
return HttpResponse(template.render(context, request)) | 4,406 |
def cpsf_critical(request):
"""
cpsf_critical page, deals with file upload and allows the user to spawn off the update task
"""
if 'project' not in request.session:
return HttpResponseRedirect(reverse('index'))
transcription_location = os.path.join(settings.ESTORIA_BASE_LOCATION,
request.session['project'],
'transcriptions',
'criticalXML')
return _upload_and_process_xml(request, cpsf_critical_xml, transcription_location,
'estoria_app/cpsf_critical.html',
'CPSF Critical') | 4,407 |
def print_relevant_docs(template: str, info: Info) -> None:
"""Print relevant docs."""
data = DATA[template]
print()
print("**************************")
print()
print()
print(f"{data['title']} code has been generated")
print()
if info.files_added:
print("Added the following files:")
for file in info.files_added:
print(f"- {file}")
print()
if info.tests_added:
print("Added the following tests:")
for file in info.tests_added:
print(f"- {file}")
print()
if info.examples_added:
print(
"Because some files already existed, we added the following example files. Please copy the relevant code to the existing files."
)
for file in info.examples_added:
print(f"- {file}")
print()
print(
f"The next step is to look at the files and deal with all areas marked as TODO."
)
if "extra" in data:
print(data["extra"]) | 4,408 |
def hlc3(high, low, close, offset=None, **kwargs):
"""Indicator: HLC3"""
# Validate Arguments
high = verify_series(high)
low = verify_series(low)
close = verify_series(close)
offset = get_offset(offset)
# Calculate Result
hlc3 = (high + low + close) / 3.0
# Offset
if offset != 0:
hlc3 = hlc3.shift(offset)
# Name & Category
hlc3.name = "HLC3"
hlc3.category = "overlap"
return hlc3 | 4,409 |
def p_attritem(p):
"""attritem : sname
| binfunc"""
p[0] = p[1] | 4,410 |
async def findID(context: Context, dueDateID: str = ""):
"""Find Due date !ass find subject_name """
if not dueDateID:
return await notEnoughArgs(context)
try:
dueDates = DueDateData().findById(context, dueDateID)
if len(dueDates) == 0:
return await context.send(Helper.talkLikeABot(f"There is no due date id as : {dueDateID}"))
return await context.send(Helper.talkDueDateAsBot(dueDates))
except Exception as e:
return await context.send(e) | 4,411 |
def expect_ref(ref_port):
"""Expect the port to be a reference."""
r = ref_port
data_type = r.get_data_type()
if (not isinstance(r, RefPort)) or len(data_type) == 0 or data_type[0] != 'Ref':
raise InvalidPortException('Expected ' + r.get_name() + ' to be a reference') | 4,412 |
def front_page() -> HTML:
"""
Renders the front page
"""
return render_template("frontPage.html") | 4,413 |
async def async_setup_entry(hass, entry):
"""Set up the Samsung TV platform."""
# Initialize bridge
data = entry.data.copy()
bridge = _async_get_device_bridge(data)
if bridge.port is None and bridge.default_port is not None:
# For backward compat, set default port for websocket tv
data[CONF_PORT] = bridge.default_port
hass.config_entries.async_update_entry(entry, data=data)
bridge = _async_get_device_bridge(data)
def stop_bridge(event):
"""Stop SamsungTV bridge connection."""
bridge.stop()
entry.async_on_unload(
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, stop_bridge)
)
hass.data[DOMAIN][entry.entry_id] = bridge
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True | 4,414 |
def do_OP_2SWAP(stack):
"""
>>> s = [1, 2, 3, 4]
>>> do_OP_2SWAP(s)
>>> print(s)
[3, 4, 1, 2]
"""
stack.append(stack.pop(-4))
stack.append(stack.pop(-4)) | 4,415 |
def bring_contact_bonus_list(pb_client, obj_pb_ids, arm_pb_id, table_pb_id):
""" For some bring goals, may be useful to also satisfy an object touching table and
not touching arm condition. """
correct_contacts = []
for o in obj_pb_ids:
o2ee_contact = len(pb_client.getContactPoints(o, arm_pb_id)) > 0
o2t_contact = len(pb_client.getContactPoints(o, table_pb_id)) > 0
correct_contacts.append(not o2ee_contact and o2t_contact)
return correct_contacts | 4,416 |
def test_daily_hour_pairs_are_incorrect():
""" test if in one day a hour pairs are incorrect (end hour is less than start hour) """
calculate_payment = CalculatePayment(line="ANA=MO16:00-12:00", idx=0)
hours_worked = calculate_payment.get_hours_worked()
assert calculate_payment.get_daily_hour_pairs_worked(hours_worked=hours_worked) == {
"MO": [[0.0, 0.0]]
} | 4,417 |
def topic(**kwargs):
"""
:param to: Topic ID
:return:
"""
return api_request('topic', kwargs) | 4,418 |
def test_create_below_86_km_layers_boundary_altitudes() -> None:
"""
Produces correct results.
We test the computation of the atmospheric variables (pressure,
temperature and mass density) at the level altitudes, i.e. at the model
layer boundaries. We assert correctness by comparing their values with the
values from the table 1 of the U.S. Standard Atmosphere 1976 document.
"""
z = to_altitude(H)
ds = create(z=z, variables=["p", "t", "rho"])
level_temperature = (
np.array([288.15, 216.65, 216.65, 228.65, 270.65, 270.65, 214.65, 186.87])
* ureg.K
)
level_pressure = (
np.array([101325.0, 22632.0, 5474.8, 868.01, 110.90, 66.938, 3.9564, 0.37338])
* ureg.Pa
)
level_mass_density = (
np.array(
[
1.225,
0.36392,
0.088035,
0.013225,
0.0014275,
0.00086160,
0.000064261,
0.000006958,
]
)
* ureg.kg
/ ureg.m ** 3
)
assert np.allclose(to_quantity(ds.t), level_temperature, rtol=1e-4)
assert np.allclose(to_quantity(ds.p), level_pressure, rtol=1e-4)
assert np.allclose(to_quantity(ds.rho), level_mass_density, rtol=1e-3) | 4,419 |
def load_data(filename: str) -> pd.DataFrame:
"""
Load city daily temperature dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (Temp)
"""
daily_temp_df = pd.read_csv(filename, parse_dates={'DayOfYear': ['Date']})
daily_temp_df = clean_data(daily_temp_df)
return daily_temp_df | 4,420 |
def group_intents(input_file, intent_file, slot_file):
"""
Groups the dataset based on the intents and returns it.
Args:
input_file : The path to the input file
intent_file : The path to the intent file
slot_file : The path to the slot file
Returns:
A dict mapping intents to a list of tuples. Each tuple contains the
input sentence and it's corresponding slots have a given intent.
"""
intent_groups = defaultdict(list)
with open(input_file, 'r') as input_fd, \
open(intent_file, 'r') as intent_fd, \
open(slot_file, 'r') as slot_fd:
for ip, intent, slot in zip(input_fd, intent_fd, slot_fd):
ip, intent, slot = ip.rstrip(), intent.rstrip(), slot.rstrip()
intent_groups[intent].append((ip, slot))
return intent_groups | 4,421 |
def _get_path(string): # gets file path from variable name
"""
Gets path that a variable holds, convert it to start from root (.),
esolves any symbolic link and returns the converted path.
"""
varname = string.replace("(long)", "")
try:
path = c.VAR_STACK[varname]
except KeyError:
if c.verbose:
print "[-] ERROR: {0} is not a variable holding path".format(varname)
return 1
path = _res_path(path)
return _abspath(path) | 4,422 |
def create_service_endpoint(service_endpoint_type, authorization_scheme, name,
github_access_token=None, github_url=None,
azure_rm_tenant_id=None, azure_rm_service_principal_id=None,
azure_rm_service_prinicipal_key=None, azure_rm_subscription_id=None,
azure_rm_subscription_name=None, organization=None,
project=None, detect=None):
"""Create a service endpoint
:param service_endpoint_type: Type of service endpoint
:type service_endpoint_type: str
:param name: Name of service endpoint to create
:type name: str
:param authorization_scheme: Authorization to be used in service endpoint creation
Github service endpoint supports PersonalAccessToken
AzureRm service endpoint supports ServicePrincipal
:type authorization_scheme: str
:param github_access_token: PAT token of github for creating github service endpoint
:type github_access_token: str
:param github_url: Url for github for creating service endpoint
:type github_url: str
:param azure_rm_tenant_id: tenant id for creating azure rm service endpoint
:type azure_rm_tenant_id: str
:param azure_rm_service_principal_id: service principal id for creating azure rm service endpoint
:type azure_rm_service_principal_id: str
:param azure_rm_service_prinicipal_key: key/password for service principal used to create azure rm service endpoint
:type azure_rm_service_prinicipal_key: str
:param azure_rm_subscription_id: subscription id for azure rm service endpoint
:type azure_rm_subscription_id: str
:param azure_rm_subscription_name: name of azure subscription for azure rm service endpoint
:type azure_rm_subscription_name: str
:param organization: Azure Devops organization URL. Example: https://dev.azure.com/MyOrganizationName/
:type organization: str
:param project: Name or ID of the project.
:type project: str
:param detect: Automatically detect organization. Default is "on".
:type detect: str
:rtype: :class:`ServiceEndpoint <service_endpoint.v4_1.models.ServiceEndpoint>`
"""
try:
organization, project = resolve_instance_and_project(detect=detect,
organization=organization,
project=project)
client = get_service_endpoint_client(organization)
if (service_endpoint_type == SERVICE_ENDPOINT_TYPE_GITHUB and
authorization_scheme == SERVICE_ENDPOINT_AUTHORIZATION_PERSONAL_ACCESS_TOKEN):
service_endpoint_authorization = EndpointAuthorization(
parameters={'accessToken': github_access_token},
scheme=SERVICE_ENDPOINT_AUTHORIZATION_PERSONAL_ACCESS_TOKEN)
service_endpoint_to_create = ServiceEndpoint(
authorization=service_endpoint_authorization,
name=name, type=SERVICE_ENDPOINT_TYPE_GITHUB, url=github_url)
return client.create_service_endpoint(service_endpoint_to_create, project)
if (service_endpoint_type == SERVICE_ENDPOINT_TYPE_AZURE_RM and
authorization_scheme == SERVICE_ENDPOINT_AUTHORIZATION_SERVICE_PRINCIPAL):
service_endpoint_authorization = EndpointAuthorization(
parameters={'tenantid': azure_rm_tenant_id,
'serviceprincipalid': azure_rm_service_principal_id,
'authenticationType': 'spnKey',
'serviceprincipalkey': azure_rm_service_prinicipal_key},
scheme=SERVICE_ENDPOINT_AUTHORIZATION_SERVICE_PRINCIPAL)
service_endpoint_data = {
'subscriptionId': azure_rm_subscription_id,
'subscriptionName': azure_rm_subscription_name,
'environment': 'AzureCloud',
'creationMode': 'Manual'
}
service_endpoint_to_create = ServiceEndpoint(
authorization=service_endpoint_authorization, data=service_endpoint_data,
name=name, type=SERVICE_ENDPOINT_TYPE_AZURE_RM, url='https://management.azure.com/')
return client.create_service_endpoint(service_endpoint_to_create, project)
raise CLIError('This combination of endpoint type is not supported with this authorization scheme.')
except VstsServiceError as ex:
raise CLIError(ex) | 4,423 |
def voronoi_diagram_interpolation(interpolationcellid, id0, id1, voronoiDataset0,
voronoiDataset1, centerlines, step,
clippingPoints):
"""Given two Voronoi datasets interpolate the data sets along the centerline.
Args:
interpolationcellid (int): LineID of the centerline
id0 (int): Start ID.
id1 (int): Stop ID.
voronoiDataset0 (vtkPolyData): First Voronoi dataset.
voronoiDataset1 (vtkPolyData): Second Voronoi dataset.
centerlines (vtkPolyData): Centerline to interpolate along.
step (int): Direction to interpolate
clippingPoints (vtkPoints): Location of clipping points.
Returns:
finalNewVoronoiPoints (vtkPoints): New points to the Voronoi diagram.
finalRadiusArray (vtkDoubelArray): Array to hold the radius for each point.
"""
cellLine = extract_single_line(centerlines, interpolationcellid)
startPoint = clippingPoints.GetPoint(id0)
endPoint = clippingPoints.GetPoint(id1)
startId = cellLine.FindPoint(startPoint)
endId = cellLine.FindPoint(endPoint)
gapStartId = startId + 1 * step
gapEndId = endId - 1 * step
arrivalId = gapEndId + 1 * step
endSavingInterval = gapEndId + 1 * step
numberOfGapPoints = int(math.fabs(gapEndId - gapStartId)) + 1
numberOfInterpolationPoints = voronoiDataset0.GetNumberOfPoints()
numberOfCenterlinesPoints = cellLine.GetNumberOfPoints()
numberOfAddedPoints = numberOfGapPoints * numberOfInterpolationPoints
finalNewVoronoiPoints = vtk.vtkPoints()
cellArray = vtk.vtkCellArray()
finalRadiusArray = get_vtk_array(radiusArrayName, 1, numberOfAddedPoints)
count = 0
for i in range(numberOfInterpolationPoints):
voronoiPoint = voronoiDataset0.GetPoint(i)
voronoiPointRadius = voronoiDataset0.GetPointData().GetArray(radiusArrayName).GetTuple1(i)
centerlinePointLocator = get_vtk_point_locator(cellLine)
closestPointId = centerlinePointLocator.FindClosestPoint(voronoiPoint)
closestPoint = cellLine.GetPoint(closestPointId)
voronoiVector = [0.0, 0.0, 0.0]
voronoiVector[0] = voronoiPoint[0] - closestPoint[0]
voronoiVector[1] = voronoiPoint[1] - closestPoint[1]
voronoiVector[2] = voronoiPoint[2] - closestPoint[2]
voronoiVectorNorm = vtk.vtkMath.Norm(voronoiVector)
rotationAngle = compute_voronoi_vector_to_centerline_angle(closestPointId, voronoiVector, cellLine)
PTPoints = vtk.vtkPoints()
range_step = 1 if closestPointId < arrivalId else -1
for j in range(closestPointId, arrivalId, range_step):
localtangent = [0.0, 0.0, 0.0]
newVoronoiVector = [0.0, 0.0, 0.0]
newVoronoiPoint = [0.0, 0.0, 0.0]
transform = vtk.vtkTransform()
point0 = cellLine.GetPoint(j)
if (j < numberOfCenterlinesPoints - 1):
point1 = [0.0, 0.0, 0.0]
cellLine.GetPoint(j + 1, point1)
localtangent[0] += point1[0] - point0[0]
localtangent[1] += point1[1] - point0[1]
localtangent[2] += point1[2] - point0[2]
if (j > 0):
point2 = [0.0, 0.0, 0.0]
cellLine.GetPoint(j - 1, point2)
localtangent[0] += point0[0] - point2[0]
localtangent[1] += point0[1] - point2[1]
localtangent[2] += point0[2] - point2[2]
localnormal = cellLine.GetPointData().GetArray(parallelTransportNormalsArrayName).GetTuple3(j)
localnormaldot = vtk.vtkMath.Dot(localtangent, localnormal)
localtangent[0] -= localnormaldot * localnormal[0]
localtangent[1] -= localnormaldot * localnormal[1]
localtangent[2] -= localnormaldot * localnormal[2]
vtk.vtkMath.Normalize(localtangent)
transform.RotateWXYZ(rotationAngle, localtangent)
transform.TransformNormal(localnormal, newVoronoiVector)
vtk.vtkMath.Normalize(newVoronoiVector)
newVoronoiPoint[0] = point0[0] + voronoiVectorNorm * newVoronoiVector[0]
newVoronoiPoint[1] = point0[1] + voronoiVectorNorm * newVoronoiVector[1]
newVoronoiPoint[2] = point0[2] + voronoiVectorNorm * newVoronoiVector[2]
PTPoints.InsertNextPoint(newVoronoiPoint)
numberOfPTPoints = PTPoints.GetNumberOfPoints()
lastPTPoint = PTPoints.GetPoint(PTPoints.GetNumberOfPoints() - 1)
voronoiPointLocator = get_vtk_point_locator(voronoiDataset1)
arrivalVoronoiPointId = voronoiPointLocator.FindClosestPoint(lastPTPoint)
arrivalVoronoiPoint = voronoiDataset1.GetPoint(arrivalVoronoiPointId)
arrivalVoronoiPointRadius = voronoiDataset1.GetPointData().GetArray(radiusArrayName).GetTuple1(
arrivalVoronoiPointId)
arrivalCenterlinePointLocator = get_vtk_point_locator(cellLine)
arrivalCenterlineClosestPointId = arrivalCenterlinePointLocator.FindClosestPoint(arrivalVoronoiPoint)
arrivalCenterlineClosestPoint = cellLine.GetPoint(arrivalCenterlineClosestPointId)
arrivalVoronoiVector = [0.0, 0.0, 0.0]
arrivalVoronoiVector[0] = arrivalVoronoiPoint[0] - arrivalCenterlineClosestPoint[0]
arrivalVoronoiVector[1] = arrivalVoronoiPoint[1] - arrivalCenterlineClosestPoint[1]
arrivalVoronoiVector[2] = arrivalVoronoiPoint[2] - arrivalCenterlineClosestPoint[2]
arrivalVoronoiVectorNorm = vtk.vtkMath.Norm(arrivalVoronoiVector)
radiusArray = compute_spline(voronoiPointRadius, arrivalVoronoiPointRadius, numberOfPTPoints)
vectorNormArray = compute_spline(voronoiVectorNorm, arrivalVoronoiVectorNorm, numberOfPTPoints)
pointsToGap = (gapStartId - closestPointId) * step
if pointsToGap < 0 or PTPoints.GetNumberOfPoints() <= pointsToGap:
continue
for k in range(gapStartId, endSavingInterval, step):
ptpoint = PTPoints.GetPoint(pointsToGap)
clpoint = cellLine.GetPoint(k)
vector = [0.0, 0.0, 0.0]
vector[0] = ptpoint[0] - clpoint[0]
vector[1] = ptpoint[1] - clpoint[1]
vector[2] = ptpoint[2] - clpoint[2]
vtk.vtkMath.Normalize(vector)
norm = vectorNormArray.GetTuple1(pointsToGap)
newvector = [0.0, 0.0, 0.0]
newvector[0] = norm * vector[0]
newvector[1] = norm * vector[1]
newvector[2] = norm * vector[2]
newpoint = [0.0, 0.0, 0.0]
newpoint[0] = clpoint[0] + newvector[0]
newpoint[1] = clpoint[1] + newvector[1]
newpoint[2] = clpoint[2] + newvector[2]
finalNewVoronoiPoints.InsertNextPoint(newpoint)
cellArray.InsertNextCell(1)
cellArray.InsertCellPoint(count)
if pointsToGap > 0:
finalRadiusArray.SetTuple1(count, radiusArray.GetTuple1(pointsToGap))
pointsToGap += 1
count += 1
return finalNewVoronoiPoints, finalRadiusArray | 4,424 |
def plot_kde_matrix(df, w, limits=None, colorbar=True, refval=None):
"""
Plot a KDE matrix.
Parameters
----------
df: Pandas Dataframe
The rows are the observations, the columns the variables.
w: np.narray
The corresponding weights.
colorbar: bool
Whether to plot the colorbars or not.
limits: dictionary, optional
Dictionary of the form ``{"name": (lower_limit, upper_limit)}``.
refval: dict, optional
A reference parameter to be shown in the plots (e.g. the
underlying ground truth parameter used to simulate the data
for testing purposes). Default: None.
"""
grid = sns.PairGrid(df, diag_sharey=False)
if limits is None:
limits = {}
default = (None, None)
def off_diagonal(x, y, **kwargs):
df = pd.concat((x, y), axis=1)
plot_kde_2d(df, w,
x.name, y.name,
xmin=limits.get(x.name, default)[0],
xmax=limits.get(x.name, default)[1],
ymin=limits.get(y.name, default)[0],
ymax=limits.get(y.name, default)[1],
ax=plt.gca(), title=False, colorbar=colorbar,
refval=refval)
def scatter(x, y, **kwargs):
alpha = w / w.max()
colors = np.zeros((alpha.size, 4))
colors[:, 3] = alpha
plt.gca().scatter(x, y, color="k")
if refval is not None:
plt.gca().scatter([refval[x.name]], [refval[y.name]], color='C1')
plt.gca().set_xlim(*limits.get(x.name, default))
plt.gca().set_ylim(*limits.get(y.name, default))
def diagonal(x, **kwargs):
df = pd.concat((x,), axis=1)
plot_kde_1d(df, w, x.name,
xmin=limits.get(x.name, default)[0],
xmax=limits.get(x.name, default)[1],
ax=plt.gca(), refval=refval)
grid.map_diag(diagonal)
grid.map_upper(scatter)
grid.map_lower(off_diagonal)
return grid | 4,425 |
def coverage(app_context, server_url, coverage_rule, json):
"""Translation coverage as per coverage rule.
e.g. transtats coverage rhinstaller """
api_obj = ConsumeAPIs(server_url or app_context.server_url) if json \
else TextOutputAPIs(server_url or app_context.server_url)
response = api_obj.rule_coverage(coverage_rule)
if isinstance(response, dict):
app_context.print_r(response) | 4,426 |
def test_html_blocks_extrax_05():
"""
Test case 05: Single line paragraph with double pragmas to start and end document.
"""
# Arrange
source_markdown = """<!-- pyml -->
<!-- pyml -->
this is a paragraph
<!-- pyml -->
<!-- pyml -->"""
expected_tokens = [
"[para(3,1):]",
"[text(3,1):this is a paragraph:]",
"[end-para:::True]",
"[pragma:1:<!-- pyml -->;2:<!-- pyml -->;4:<!-- pyml -->;5:<!-- pyml -->]",
]
expected_gfm = "<p>this is a paragraph</p>"
# Act & Assert
act_and_assert(
source_markdown, expected_gfm, expected_tokens, disable_consistency_checks=True
) | 4,427 |
def rebuild(filename, tag=None, format="gz"):
"""Rebuild the internal timezone info in dateutil/zoneinfo/zoneinfo*tar*
filename is the timezone tarball from ftp.iana.org/tz.
"""
import tempfile, shutil
tmpdir = tempfile.mkdtemp()
zonedir = os.path.join(tmpdir, "zoneinfo")
moduledir = os.path.dirname(__file__)
if tag: tag = "-"+tag
targetname = "zoneinfo%s.tar.%s" % (tag, format)
try:
tf = TarFile.open(filename)
# The "backwards" zone file contains links to other files, so must be
# processed as last
for name in sorted(tf.getnames(),
key=lambda k: k != "backward" and k or "z"):
if not (name.endswith(".sh") or
name.endswith(".tab") or
name == "leapseconds"):
tf.extract(name, tmpdir)
filepath = os.path.join(tmpdir, name)
try:
# zic will return errors for nontz files in the package
# such as the Makefile or README, so check_call cannot
# be used (or at least extra checks would be needed)
call(["zic", "-d", zonedir, filepath])
except OSError as e:
if e.errno == 2:
logging.error(
"Could not find zic. Perhaps you need to install "
"libc-bin or some other package that provides it, "
"or it's not in your PATH?")
raise
tf.close()
target = os.path.join(moduledir, targetname)
for entry in os.listdir(moduledir):
if entry.startswith("zoneinfo") and ".tar." in entry:
os.unlink(os.path.join(moduledir, entry))
tf = TarFile.open(target, "w:%s" % format)
for entry in os.listdir(zonedir):
entrypath = os.path.join(zonedir, entry)
tf.add(entrypath, entry)
tf.close()
finally:
shutil.rmtree(tmpdir) | 4,428 |
def get_root(user_id: Optional[int]) -> str:
"""
Return the absolute path to the current authenticated user's data storage
root directory
:param user_id: current user ID (None if user auth is disabled)
:return: user's data storage path
"""
root = app.config['DATA_FILE_ROOT']
if user_id:
root = os.path.join(root, str(user_id))
return os.path.abspath(os.path.expanduser(root)) | 4,429 |
def download_model(model_id, file_format="json", save=True, path="."):
"""
Download models from BiGG. You can chose to save the file or to return the JSON data.
Parameters
----------
model_id : str
A valid id for a model in BiGG.
file_format : str
If you want to save the file, you can import the model in the following formats:
1. json (JSON format)
2. xml (SBML)
3. xml.gz (SBML compressed)
4. mat (MATLAB)
save : bool
If True, writes the model to a file with the model name (the path can be specified).
path : str
Specifies in which folder the model should be written if *save* is True.
Returns
-------
model : Model
If save is False, it returns the parsed model. If save is True, it saves the model in the requested format.
Raises
------
requests.HTTPError
If the request has failed.
"""
if save:
response = requests.get("http://bigg.ucsd.edu/static/models/%s.%s" % (model_id, file_format), stream=True)
response.raise_for_status()
with open(os.path.join(path, "%s.%s" % (model_id, file_format)), "wb") as model_file:
for block in response.iter_content(1024):
model_file.write(block)
else:
response = requests.get("http://bigg.ucsd.edu/static/models/%s.json" % model_id, stream=True)
response.raise_for_status()
return model_from_dict(response.json()) | 4,430 |
def open_expand(file_path, *args, **kwargs):
"""
Allows to use '~' in file_path.
"""
return open(os.path.expanduser(file_path), *args, **kwargs) | 4,431 |
def get_app():
"""load API modules and return the WSGI application"""
global get_app, _app, login_manager
_app = Flask(__name__,
instance_relative_config=True,
instance_path=os.environ.get('UKNOW_CONFIG'))
_app.config.from_object(DefaultConfig())
_app.secret_key = 'WTF is this!!' # Should have this to work
login_manager = LoginManager()
login_manager.init_app(_app)
import_all_modules(__file__, __name__)
get_app = lambda: _app
return _app | 4,432 |
def _submit_to_all_logs(log_list, certs_chain):
"""Submits the chain to all logs in log_list and validates SCTs."""
log_id_to_verifier = _map_log_id_to_verifier(log_list)
chain_der = [c.to_der() for c in certs_chain]
raw_scts_for_cert = []
for log_url in log_list.keys():
res = _submit_to_single_log(log_url, chain_der)
if res:
raw_scts_for_cert.append(res)
else:
logging.info("No SCT from log %s", log_url)
validated_scts = []
for raw_sct in raw_scts_for_cert:
key_id = raw_sct.id.key_id
try:
log_id_to_verifier[key_id].verify_sct(raw_sct, certs_chain)
validated_scts.append(raw_sct)
except error.SignatureError as err:
logging.warning(
'Discarding SCT from log_id %s which does not validate: %s',
key_id.encode('hex'), err)
except KeyError as err:
logging.warning('Could not find CT log validator for log_id %s. '
'The log key for this log is probably misconfigured.',
key_id.encode('hex'))
scts_for_cert = [tls_message.encode(proto_sct)
for proto_sct in validated_scts
if proto_sct]
sct_list = client_pb2.SignedCertificateTimestampList()
sct_list.sct_list.extend(scts_for_cert)
return tls_message.encode(sct_list) | 4,433 |
def update(oid, landingZoneProgressItemDetails):
"""
This function updates an existing landingZoneProgressItem in the landingZoneProgressItem list
:param id: id of the landingZoneProgressItem to update in the landingZoneProgressItem list
:param landingZoneProgressItem: landingZoneProgressItem to update
:return: updated landingZoneProgressItem
"""
app.logger.debug("landingZoneProgressItem: ")
app.logger.debug(pformat(landingZoneProgressItemDetails))
app.logger.debug(oid)
app.logger.debug(landingZoneProgressItemDetails["id"])
if landingZoneProgressItemDetails["id"] != oid:
abort(400, "Key mismatch in path and body")
# Does the landingZoneProgressItem exist in landingZoneProgressItems?
existing_landingZoneProgressItem = (
db.session.query(LandingZoneProgressItem)
.filter(LandingZoneProgressItem.id == oid)
.one_or_none()
)
# Does landingZoneProgressItem exist?
if existing_landingZoneProgressItem is not None:
schema = LandingZoneProgressItemSchema()
update_landingZoneProgressItem = schema.load(
landingZoneProgressItemDetails, session=db.session
)
update_landingZoneProgressItem.id = oid
db.session.merge(update_landingZoneProgressItem)
db.session.commit()
# return the updated landingZoneProgressItem in the response
data = schema.dump(update_landingZoneProgressItem)
app.logger.debug("landingZoneProgressItem data:")
app.logger.debug(pformat(data))
return data, 200
# otherwise, nope, landingZoneProgressItem doesn't exist, so that's an error
else:
abort(404, "LandingZoneProgressItem not found") | 4,434 |
def create_connection(db_file: str):
"""Create database file."""
conn = None
try:
conn = sqlite3.connect(db_file)
print(sqlite3.version)
except Error as e:
print(e)
return conn | 4,435 |
def _simplex_gradient(op, grad_wrt_weight):
"""Register gradient for SimplexInterpolationOp."""
grad_wrt_input = simplex_gradient(
input=op.inputs[0],
weight=op.outputs[0],
grad_wrt_weight=grad_wrt_weight,
lattice_sizes=op.get_attr('lattice_sizes'))
return [grad_wrt_input] | 4,436 |
def assert_equal(actual: Type[numpy.float64], desired: numpy.dtype):
"""
usage.scipy: 1
"""
... | 4,437 |
def generate_experiment():
"""
Generate elastic scattering experiments which are reasonable but random
"""
exp_dict = {}
exp_keys = ['qmin', 'qmax', 'qbin', 'rmin', 'rmax', 'rstep']
exp_ranges = [(0, 1.5), (19., 25.), (.8, .12), (0., 2.5), (30., 50.),
(.005, .015)]
for n, k in enumerate(exp_keys):
exp_dict[k] = rs.uniform(exp_ranges[n][0], exp_ranges[n][1])
exp_dict['sampling'] = rs.choice(['full', 'ns'])
return exp_dict | 4,438 |
def connectDB():
"""function to start the database connection using MongoClient from pymongo and the connection link from .env file path. Using certifi to provide certificate in order to enable the connection
Returns:
Cursor: database white-shark
"""
try:
client = MongoClient(f"{MONGO_URI}", tlsCAFile=ca)
return client["white-shark"]
except:
print("Connection failed") | 4,439 |
def load_dicom(filename):
"""Loads in a given dicom file using a pydicom library
:param filename: a path to the .dcm.gz or .dcm file
:type filename: Union[str, os.path]
:return: pydicom.dataset.FileDataset or pydicom.dicomdir.DicomDir
:raises TypeError: raised if the file extension does not end with .dcm nor .gz
"""
if filename.endswith('.dcm'):
ds = dicom.dcmread(filename)
elif filename.endswith('.gz'):
with gzip.open(filename) as fd:
ds = dicom.dcmread(fd, force=True)
else:
raise TypeError
ds.file_meta.TransferSyntaxUID = dicom.uid.ImplicitVRLittleEndian
return ds | 4,440 |
def test__django_keys_int__float():
""" A float value cannot be interpreted as an int.
"""
keys = DjangoKeys(DJANGOKEYS_ACCESSING_TYPES_ENV_PATH)
with pytest.raises(ValueTypeMismatch):
keys.int("INT_VALUE_FLOAT") | 4,441 |
def load_pytorch_policy(fpath, itr, deterministic=False):
""" Load a pytorch policy saved with Spinning Up Logger."""
fname = osp.join(fpath, 'pyt_save', 'model'+itr+'.pt')
print('\n\nLoading from %s.\n\n'%fname)
model = torch.load(fname)
# make function for producing an action given a single state
def get_action(x):
with torch.no_grad():
x = torch.as_tensor(x, dtype=torch.float32)
if deterministic:
action = model.pi(x)[0].mean.numpy()
else:
action = model.act(x)
return action
return get_action | 4,442 |
def pkl_dependencies(module, delta, existing):
"""peer-keepalive dependency checking.
1. 'destination' is required with all pkl configs.
2. If delta has optional pkl keywords present, then all optional pkl
keywords in existing must be added to delta, otherwise the device cli
will remove those values when the new config string is issued.
3. The desired behavior for this set of properties is to merge changes;
therefore if an optional pkl property exists on the device but not
in the playbook, then that existing property should be retained.
Example:
CLI: peer-keepalive dest 10.1.1.1 source 10.1.1.2 vrf orange
Playbook: {pkl_dest: 10.1.1.1, pkl_vrf: blue}
Result: peer-keepalive dest 10.1.1.1 source 10.1.1.2 vrf blue
"""
pkl_existing = [i for i in existing.keys() if i.startswith("pkl")]
for pkl in pkl_existing:
param = module.params.get(pkl)
if not delta.get(pkl):
if param and param == existing[pkl]:
# delta is missing this param because it's idempotent;
# however another pkl command has changed; therefore
# explicitly add it to delta so that the cli retains it.
delta[pkl] = existing[pkl]
elif param is None and existing[pkl]:
# retain existing pkl commands even if not in playbook
delta[pkl] = existing[pkl] | 4,443 |
def assert_pickle(test, obj, value_to_compare=lambda x: x.__dict__, T=None):
"""
Asserts that an object can be dumped and loaded and still maintain its
value.
Args:
test: Instance of `unittest.TestCase` (for assertions).
obj: Obj to dump and then load.
value_to_compare: (optional) Value to extract from the object to
compare. By default, compares dictionaries.
T: (optional) When pickling template instantiations on scalar types,
pass the scalar type T. This is used because `Expression` is
currently not a serializable type.
"""
metaclass = type(type(obj))
if T == Expression:
# Pickling not enabled for Expression.
return
else:
f = BytesIO()
pickle.dump(obj, f)
f.seek(0)
obj_again = pickle.load(f)
_assert_equal(test, value_to_compare(obj), value_to_compare(obj_again)) | 4,444 |
def make_server(dashboard):
"""
Creates the server by mounting various API endpoints and static file content for the dashboard
Parameters
----------
dashboard : plsexplain.Dashboard
The dashboard instance to server
Returns
-------
FastAPI
The application instance that hosts the dashboard instance.
"""
app = FastAPI()
asset_folder = join(abspath(dirname(dirname(__file__))), "client/dist/images")
app.add_api_route("/api/metadata", get_model_metadata(dashboard), methods=["get"])
app.add_api_route("/api/performance", get_model_performance(dashboard), methods=["get"])
app.add_api_route("/api/model/features", get_feature_importance(dashboard), methods=["get"])
app.add_api_route("/api/model/features/{name:str}", get_feature_profile(dashboard), methods=["get"])
app.add_api_route("/api/dataset", get_dataset(dashboard), methods=["get"])
app.add_api_route("/api/predictions/{index:int}/breakdown", get_prediction_breakdown(dashboard), methods=["get"])
app.add_api_route("/api/predictions/{index}/profile/{feature}", get_prediction_profile(dashboard), methods=["get"])
app.mount("/images", StaticFiles(directory=asset_folder), name="static")
app.add_api_route("/{sub_path:path}", get_client_app, methods=["get"], response_class=HTMLResponse)
return app | 4,445 |
def add_arguments(parser: KGTKArgumentParser):
"""
Parse arguments
"""
parser.add_argument('-i', '--indent', action='count', default=0, help='indentation') | 4,446 |
def vis_mask(checkpoint_path, filename, target_dir, resolution=480):
"""Use a trained PL checkpoint to compute attention mask on given image."""
patch_size = 8
# mlp_dino = DINOSeg(data_path='dummy', write_path='dummy', n_blocks=3)
mlp_dino = DINOSeg.load_from_checkpoint(checkpoint_path).to('cuda:0' if torch.cuda.is_available() else 'cpu')
# This only affects the inference resolution. The output is still 480x480
mlp_dino.set_resolution(resolution)
with torch.no_grad():
if not os.path.exists(target_dir):
os.makedirs(target_dir)
with open(filename, 'rb') as file:
img = Image.open(file)
x = img.convert('RGB')
# Ge predictions
x = mlp_dino.transforms(image=np.array(x))['image'].unsqueeze(0).to(mlp_dino.device)
attentions = mlp_dino.dino.get_last_selfattention(x)
nh = attentions.shape[1] # number of head
# we keep only the output patch attention
attentions = attentions[0, :, 0, 1:].reshape(nh, -1)
attentions = attentions.reshape(nh, resolution // patch_size, resolution // patch_size)
attentions = nn.functional.interpolate(attentions.unsqueeze(0),
scale_factor=patch_size, mode="nearest")[0].cpu().numpy()
torchvision.utils.save_image(torchvision.utils.make_grid(x, normalize=True, scale_each=True),
os.path.join(target_dir, 'img.png'))
for j in range(nh):
fname = os.path.join(target_dir, "attn-head-dino" + str(j) + ".png")
plt.imsave(fname=fname, arr=attentions[j], format='png')
print(f"{fname} saved.") | 4,447 |
def f():
"""This is a function docstring."""
pass | 4,448 |
def bash_complete_line(line, return_line=True, **kwargs):
"""Provides the completion from the end of the line.
Parameters
----------
line : str
Line to complete
return_line : bool, optional
If true (default), will return the entire line, with the completion added.
If false, this will instead return the strings to append to the original line.
kwargs : optional
All other keyword arguments are passed to the bash_completions() function.
Returns
-------
rtn : set of str
Possible completions of prefix
"""
# set up for completing from the end of the line
split = line.split()
if len(split) > 1 and not line.endswith(" "):
prefix = split[-1]
begidx = len(line.rsplit(prefix)[0])
else:
prefix = ""
begidx = len(line)
endidx = len(line)
# get completions
out, lprefix = bash_completions(prefix, line, begidx, endidx, **kwargs)
# reformat output
if return_line:
preline = line[:-lprefix]
rtn = {preline + o for o in out}
else:
rtn = {o[lprefix:] for o in out}
return rtn | 4,449 |
def use_board(name):
"""
Use Board.
"""
_init_pins()
return r_eval("pins::use_board(\"" + name + "\")") | 4,450 |
def add_parameter(name, initial_value=1.0, **kwargs):
"""Adds a new global parameter to the model.
:param name: the name for the new global parameter
:type name: str
:param initial_value: optional the initial value of the parameter (defaults to 1)
:type initial_value: float
:param kwargs: optional parameters, recognized are:
* | `model`: to specify the data model to be used (if not specified
| the one from :func:`.get_current_model` will be taken)
* all other parameters from :func:`set_parameters`.
:return: the newly created parameter
"""
dm = kwargs.get('model', model_io.get_current_model())
assert (isinstance(dm, COPASI.CDataModel))
model = dm.getModel()
assert (isinstance(model, COPASI.CModel))
parameter = model.createModelValue(name, initial_value)
if parameter is None:
raise ValueError('A global parameter named ' + name + ' already exists')
set_parameters(name, **kwargs)
return parameter | 4,451 |
def get_sample_libraries(samples, study_tables):
"""
Return libraries for samples.
:param samples: Sample object or a list of Sample objects within a study
:param study_tables: Rendered study tables
:return: GenericMaterial queryset
"""
from samplesheets.models import GenericMaterial
if type(samples) not in [list, QuerySet]:
samples = [samples]
sample_names = [s.name for s in samples]
study = samples[0].study
library_names = []
for k, assay_table in study_tables['assays'].items():
sample_idx = get_index_by_header(
assay_table, 'name', obj_cls=GenericMaterial, item_type='SAMPLE'
)
for row in assay_table['table_data']:
if row[sample_idx]['value'] in sample_names:
last_name = get_last_material_name(row, assay_table)
if last_name not in library_names:
library_names.append(last_name)
return GenericMaterial.objects.filter(
study=study, name__in=library_names
).order_by('name') | 4,452 |
def parse_next_frame(data):
"""
Parse the next packet from this MQTT data stream.
"""
if not data:
return None, b''
if len(data) < 2:
# Not enough data yet
return None, data
packet_type, flag1, flag2, flag3, flag4 = bitstruct.unpack('u4b1b1b1b1', data[0:1])
length = None
# Figure out the length of the packet
seek_point = 0
seek_multiplier = 1
packet_length = 0
encoded_byte = -1
while (encoded_byte & 128) != 0:
seek_point += 1
if len(data) < 1 + seek_point:
# Not enough data
return None, data
encoded_byte, = bitstruct.unpack('u8', data[seek_point:seek_point+1])
packet_length += (encoded_byte & 127) * seek_multiplier
seek_multiplier = seek_multiplier * 128
if seek_multiplier > 128 * 128 * 128:
raise ParseFailure()
# Do we have the whole packet?
if len(data) < 1 + seek_point + packet_length:
# Not the whole packet yet
return None, data
# Build the frame
frame = Frame(
packet_type=PacketType(packet_type),
flags=(flag1, flag2, flag3, flag4),
body=data[1 + seek_point:packet_length + 1 + seek_point])
# Return the data we didn't consume
data = data[1 + seek_point + packet_length:]
return frame, data | 4,453 |
def download_page(driver, url, path):
"""Download a page if it does not exist."""
driver.get(url)
spinner = 'modalTelaCarregando'
try:
WebDriverWait(driver, WAIT).until(
ec.invisibility_of_element((By.ID, spinner)))
except (TimeoutError, socket.timeout, HTTPError):
log(f'Error: waiting for {spinner} to stop')
return
# spinner = 'linkCites'
# try:
# WebDriverWait(driver, WAIT).until(
# ec.invisibility_of_element((By.ID, spinner)))
# except (TimeoutError, socket.timeout, HTTPError):
# log(f'Error: waiting for {spinner} to stop')
# return
with open(path, 'w') as out_file:
out_file.write(driver.page_source) | 4,454 |
def pick_glance_api_server():
"""Return which Glance API server to use for the request
This method provides a very primitive form of load-balancing suitable for
testing and sandbox environments. In production, it would be better to use
one IP and route that to a real load-balancer.
Returns (host, port)
"""
host_port = random.choice(FLAGS.glance_api_servers)
host, port_str = host_port.split(':')
port = int(port_str)
return host, port | 4,455 |
def remove_punctuation(transcriptions):
"""
:param: transcriptions is the dictionary containing text file that has been
converted into an array.
:return: cleaned string of words
This function removes punctuations from the story """
parsed_string = dumps(transcriptions)
punctuations = '''[],!.'"\\?'''
for char in parsed_string:
if char in punctuations:
parsed_string = parsed_string.replace(char, '')
return parsed_string | 4,456 |
def test_reverse_short():
"""Test reversing a short string."""
expected = reverse("Alex")
actual = "xelA"
assert actual == expected | 4,457 |
def snakify(str_: str) -> str:
"""Convert a string to snake case
Args:
str_: The string to convert
"""
return str_.replace(" ", "_").lower() | 4,458 |
def search_images(
project,
image_name_prefix=None,
annotation_status=None,
return_metadata=False
):
"""Search images by name_prefix (case-insensitive) and annotation status
:param project: project name or folder path (e.g., "project1/folder1")
:type project: str
:param image_name_prefix: image name prefix for search
:type image_name_prefix: str
:param annotation_status: if not None, annotation statuses of images to filter,
should be one of NotStarted InProgress QualityCheck Returned Completed Skipped
:type annotation_status: str
:param return_metadata: return metadata of images instead of names
:type return_metadata: bool
:return: metadata of found images or image names
:rtype: list of dicts or strs
"""
project, project_folder = get_project_and_folder_metadata(project)
team_id, project_id = project["team_id"], project["id"]
if annotation_status is not None:
annotation_status = common.annotation_status_str_to_int(
annotation_status
)
if project_folder is not None:
project_folder_id = project_folder["id"]
else:
project_folder_id = get_project_root_folder_id(project)
result_list = []
params = {
'team_id': team_id,
'project_id': project_id,
'annotation_status': annotation_status,
'offset': 0,
'folder_id': project_folder_id
}
if image_name_prefix is not None:
params['name'] = image_name_prefix
total_got = 0
total_images = 0
while True:
response = _api.send_request(
req_type='GET', path='/images-folders', params=params
)
if not response.ok:
raise SABaseException(
response.status_code, "Couldn't search images " + response.text
)
response = response.json()
images = response["images"]
folders = response["folders"]
results_images = images["data"]
for r in results_images:
if return_metadata:
result_list.append(r)
else:
result_list.append(r["name"])
total_images += len(results_images)
if images["count"] <= total_images:
break
total_got += len(results_images) + len(folders["data"])
params["offset"] = total_got
if return_metadata:
def process_result(x):
x["annotation_status"] = common.annotation_status_int_to_str(
x["annotation_status"]
)
return x
return list(map(process_result, result_list))
else:
return result_list | 4,459 |
def bibtexNoteszotero(bibtex_names):
"""
params:
bibtex_names, {}
response, {}
return: notes_dict, {}
"""
#
notes_dict = {}
notes_dict["itemType"] = "note"
notes_dict["relations"] = {}
notes_dict["tags"] = []
notes_dict["note"] = bibtex_names["notes"].strip()
#
return notes_dict | 4,460 |
def iliev_test_5(N=10000,
Ns=10,
L=15. | units.kpc,
dt=None):
"""
prepare iliev test and return SPH and simplex interfaces
"""
gas, sources = iliev_test_5_ic(N, Ns, L)
conv = nbody_system.nbody_to_si(1.0e9 | units.MSun, 1.0 | units.kpc)
sph = Fi(conv, use_gl=False, mode='periodic', redirection='none')
sph.initialize_code()
sph.parameters.use_hydro_flag = True
sph.parameters.radiation_flag = False
sph.parameters.self_gravity_flag = False
sph.parameters.gamma = 1
sph.parameters.isothermal_flag = True
sph.parameters.integrate_entropy_flag = False
sph.parameters.timestep = dt
sph.parameters.verbosity = 0
sph.parameters.pboxsize = 2*L
sph.commit_parameters()
sph.gas_particles.add_particles(gas)
sph.commit_particles()
# sph.start_viewer()
rad = SimpleX(number_of_workers=1, redirection='none')
rad.initialize_code()
rad.parameters.box_size = 2*L
rad.parameters.hilbert_order = 0
rad.commit_parameters()
gas.add_particles(sources)
rad.particles.add_particles(gas)
rad.commit_particles()
return sph, rad | 4,461 |
def test_dynamics_tracer():
"""Sanity check for dynamics tracer."""
tracer = wn.causal_graphs.trace_dynamics(simple_dynamics)
for time in range(10):
state = SimpleState()
config = SimpleConfig()
dependencies = tracer(state, time, config)
if time % 3 == 0:
assert set(dependencies["x1"]["states"]) == set(["x1", "x2", "x3"])
assert set(dependencies["x1"]["configs"]) == set([])
assert set(dependencies["x2"]["states"]) == set(["x1", "x2", "x3"])
assert set(dependencies["x2"]["configs"]) == set(["param"])
assert set(dependencies["x3"]["states"]) == set(["x1", "x2", "x3"])
assert set(dependencies["x3"]["configs"]) == set([])
elif time % 3 == 1:
assert set(dependencies["x1"]["states"]) == set(["x1"])
assert set(dependencies["x1"]["configs"]) == set([])
assert set(dependencies["x2"]["states"]) == set(["x2"])
assert set(dependencies["x2"]["configs"]) == set([])
assert set(dependencies["x3"]["states"]) == set(["x3"])
assert set(dependencies["x3"]["configs"]) == set([])
else:
assert set(dependencies["x1"]["states"]) == set([])
assert set(dependencies["x1"]["configs"]) == set(["param"])
assert set(dependencies["x2"]["states"]) == set(["x1", "x2", "x3"])
assert set(dependencies["x2"]["configs"]) == set([])
assert set(dependencies["x3"]["states"]) == set([])
assert set(dependencies["x3"]["configs"]) == set([]) | 4,462 |
def initialize_list(list_name, number_of_fields, value):
"""
Set given number of fields with given value to a list
Arguments:
- list_name: name of list to initialize
- number_of_fields: number of fields to add
- value: value to insert in fields
"""
# in case if not empty list
list_name.clear()
for i in range(number_of_fields):
list_name.append(value) | 4,463 |
def run(model, model_params, T,
method, method_params, num_iter,
tmp_path="/tmp/consistency_check.txt",
seed=None,
verbose=False,
simplified_interface=True):
"""
Wrapper around the full consistency check pipeline.
Parameters
----------
model : str
Name of the generative model.
Implemented models are: gn, generalized_gn.
model_params : dict
Parameters of the generative model.
T : int
Number of generative step.
method : str
Name of the inference method.
Implemented methods are: degree, OD, random_expand,
snowball_sampling, biased_snowball_sampling.
method_params : dict
Parameters of the inference method.
num_iter : int
Number of repetition of the inference method.
Note that all repetitions run on the same model instance.
tmp_path : str
Location where temporary files will be written.
verbose : bool
Output logs to stdout.
simplified_interface : bool
Assume that the generator is compiled with a simplified interface (i.e., not Boost).
Returns
-------
scores : list of dict
A list of scores (one per repetition).
Each entry of the list corresponds to a repetition of the method.
An entry in the list is a dictionary, whose key is the name of
the comparison measure.
Warning
-------
This function has side-effects. It writes and read from a temporary
location (defaulted to /tmp/) to communicate with pre-compiled modules.
If multiple instances run at the same time, make sure to pass different
temporary paths to each instances.
"""
# Tests
if {model} & available_models == set():
raise NotImplementedError("Model '" + str(model) +
"' not implemented.")
if {method} & available_methods == set():
raise NotImplementedError("Method '" + str(method) +
"' not implemented.")
# Generate history
generated_history = gn.run(model, model_params, T,
verbose=verbose, seed=seed,
simplified_interface=simplified_interface)
encoded_history, encoding, tag_encoding = obfuscate_history(generated_history, seed=seed)
_write_history(encoded_history, tmp_path)
# Infer and compute similarity
scores = []
for i in range(num_iter):
output = im.run(tmp_path, method, method_params, verbose=verbose)
if len(generated_history) != len(output):
RuntimeError("Length of generated and inferred data don't match.")
inferred = deobfuscate_history([x[0] for x in output], encoding, tag_encoding)
res = cp.corr(generated_history,
[(e, _[1]) for e, _ in zip(inferred, output)])
scores.append(res)
# Garbage collection
remove(tmp_path)
return scores | 4,464 |
def clean_street(address: str) -> str:
"""
Function to clean street strings.
"""
address = address.lower()
address = _standardize_street(address)
address = _abb_replace(address)
address = _ordinal_rep(address)
if address in SPECIAL_CASES.keys(): # Special cases
address = SPECIAL_CASES[address]
return address | 4,465 |
def unroll_policy_for_eval(
sess,
env,
inputs_feed,
prev_state_feed,
policy_outputs,
number_of_steps,
output_folder,
):
"""unrolls the policy for testing.
Args:
sess: tf.Session
env: The environment.
inputs_feed: dictionary of placeholder for the input modalities.
prev_state_feed: placeholder for the input to the prev_state of the model.
policy_outputs: tensor that contains outputs of the policy.
number_of_steps: maximum number of unrolling steps.
output_folder: output_folder where the function writes a dictionary of
detailed information about the path. The dictionary keys are 'states' and
'distance'. The value for 'states' is the list of states that the agent
goes along the path. The value for 'distance' contains the length of
shortest path to the goal at each step.
Returns:
states: list of states along the path.
distance: list of distances along the path.
"""
prev_state = [
np.zeros((1, FLAGS.lstm_cell_size), dtype=np.float32) for _ in range(2)
]
prev_action = np.zeros((1, 1, FLAGS.action_size + 1), dtype=np.float32)
obs = env.reset()
distances_to_goal = []
states = []
unique_id = '{}_{}'.format(env.cur_image_id(), env.goal_string)
for _ in range(number_of_steps):
distances_to_goal.append(
np.min([
len(
nx.shortest_path(env.graph, env.pose_to_vertex(env.state()),
env.pose_to_vertex(target_view)))
for target_view in env.targets()
]))
states.append(env.state())
feed_dict = {inputs_feed[mtype]: [[obs[mtype]]] for mtype in inputs_feed}
feed_dict[prev_state_feed[0]] = prev_state[0]
feed_dict[prev_state_feed[1]] = prev_state[1]
action_values, prev_state = sess.run(policy_outputs, feed_dict=feed_dict)
chosen_action = np.argmax(action_values[0])
obs, _, done, info = env.step(np.int32(chosen_action))
prev_action[0][0][chosen_action] = 1.
prev_action[0][0][-1] = float(info['success'])
# If the agent chooses action stop or the number of steps exceeeded
# env._episode_length.
if done:
break
# logging.info('distance = %d, id = %s, #steps = %d', distances_to_goal[-1],
output_path = os.path.join(output_folder, unique_id + '.npy')
with tf.gfile.Open(output_path, 'w') as f:
print 'saving path information to {}'.format(output_path)
np.save(f, {'states': states, 'distance': distances_to_goal})
return states, distances_to_goal | 4,466 |
def init_worker():
""" Process pool initialization. """
# prevent SIGINT propagation to the subprocesses
signal(SIGINT, SIG_IGN) | 4,467 |
def get_result_filename(params, commit=''):
"""
获取时间
:return:
"""
save_result_dir = params['test_save_dir']
batch_size = params['batch_size']
epochs = params['epochs']
max_length_inp = ['max_dec_len']
embedding_dim = ['embed_size']
now_time = time.strftime('%Y_%m_%d_%H_%M_%S')
filename = now_time + '_batch_size_{}_epochs_{}_max_length_inp_{}_embedding_dim_{}{}.csv'.format(batch_size, epochs,
max_length_inp,
embedding_dim,
commit)
result_save_path = os.path.join(save_result_dir, filename)
return result_save_path | 4,468 |
def is_iterable(o: any) -> bool:
"""
Checks if `o` is iterable
Parameters
----------
o : any
The value to be checked.
Examples
--------
>>> is_iterable(list(range(5)))
True
>>> is_iterable(5)
False
>>> is_iterable('hello world')
True
>>> is_iterable(None)
False
"""
try:
_ = iter(o)
except TypeError:
return False
return True | 4,469 |
def fit(init_file, semipar=False):
""" """
check_presence_init(init_file)
dict_ = read(init_file)
# Perform some consistency checks given the user's request
check_presence_estimation_dataset(dict_)
check_initialization_dict(dict_)
# Semiparametric Model
if semipar is True:
quantiles, mte_u, X, b1_b0 = semipar_fit(init_file) # change to dict_
# Construct MTE
# Calculate the MTE component that depends on X
mte_x = np.dot(X, b1_b0)
# Put the MTE together
mte = mte_x.mean(axis=0) + mte_u
# Accounting for variation in X
mte_min = np.min(mte_x) + mte_u
mte_max = np.max(mte_x) + mte_u
rslt = {
"quantiles": quantiles,
"mte": mte,
"mte_x": mte_x,
"mte_u": mte_u,
"mte_min": mte_min,
"mte_max": mte_max,
"X": X,
"b1-b0": b1_b0,
}
# Parametric Normal Model
else:
check_par(dict_)
rslt = par_fit(dict_)
return rslt | 4,470 |
def date2gpswd(date):
"""Convert date to GPS week and day of week, return int tuple (week, day).
Example:
>>> from datetime import date
>>> date2gpswd(date(2017, 5, 17))
(1949, 3)
>>> date2gpswd(date(1917, 5, 17))
Traceback (most recent call last):
...
ValueError: Invalid date: 1917-05-17, too early.
"""
return __date2weeksday(date, GPS_START_DATE) | 4,471 |
def draw():
"""This function clears the screen and draws a single
pixel, whenever the buffer needs updating. Note that
colors are specified as palette indexes (0-15)."""
pyxel.cls(0) # clear screen (color)
render()
pyxel.mouse(True) | 4,472 |
def export_catalog(dataframe, **kwargs):
"""
exports data as csv
dataframe : pandas.DataFrame
kwargs : pandas.DataFrame.to_csv kwargs
"""
dataframe.to_csv(**kwargs) | 4,473 |
def installExceptionHandler():
"""
Install the exception handling function.
"""
sys.excepthook = lambda etype, value, tb: handleMyException((etype, value, tb)) | 4,474 |
def __make_sliders(no, f):
"""Create dynamic sliders for a specific field"""
style = {'width':'20%', 'display': 'none'}
return html.Div(id={'index': f'Slider_{no}', 'type':'slider'},
children=[__make_slider(no, i) for i in range(1,f+1)], style=style) | 4,475 |
def run(**options): # pragma: no cover
"""Runs a Job."""
CLIUtils.run(**options) | 4,476 |
def kurtosis(iterable, sample=False):
""" Returns the degree of peakedness of the given list of values:
> 0.0 => sharper peak around mean(list) = more infrequent, extreme values,
< 0.0 => wider peak around mean(list),
= 0.0 => normal distribution,
= -3 => flat
"""
a = iterable if isinstance(iterable, list) else list(iterable)
return moment(a, 4, sample) / (moment(a, 2, sample) ** 2.0 or 1) - 3 | 4,477 |
def gen_data(shape, dtype, epsilon):
"""Generate data for testing the op."""
var = random_gaussian(shape, miu=1, sigma=0.3).astype(dtype)
m = random_gaussian(shape, miu=1, sigma=0.3).astype(dtype)
v = random_gaussian(shape, miu=1, sigma=0.3).astype(dtype)
grad = random_gaussian(shape, miu=1, sigma=0.3).astype(dtype)
lr = np.random.rand(1).astype(dtype)
beta1 = np.random.rand(1).astype(dtype)
beta2 = np.random.rand(1).astype(dtype)
beta1_power = beta1 * beta1
inputs = [var, m, v, grad, lr, beta1, beta1_power, beta2]
one = np.array([1]).astype(dtype)
epsilon = np.array([epsilon]).astype(dtype)
out_m = beta1 * m + (one - beta1) * grad
out_v = np.maximum(beta2 * v, np.abs(grad))
out_var = var - lr * out_m / ((one - beta1_power) * (out_v + epsilon))
expects = [out_var, out_m, out_v]
args = inputs
return inputs, expects, args | 4,478 |
def add_common_options(parser):
"""Add common options to the parser.
Args:
parser : parser to add the arguments to.
Return:
parser : After adding the arguments.
"""
# Common Arguments
parser.add_argument('--data_path', '-dp', dest='data_path', required=False, type=str, nargs=None, action = 'store', default="/GWAS/data/")
parser.add_argument('--data_identifier','-data', dest='data_identifier', required=False, type=str, nargs=None, action = 'store', default= "file_name") | 4,479 |
def factor_tmom_T1_RTN_60(df: pd.DataFrame):
"""
factor example
"""
factor = df['return'].rolling(60).sum()
return factor | 4,480 |
def get_dataset_info(dataset_name='mnist'):
"""Method to return dataset information for a specific dataset_name.
Args:
dataset_name: a string representing the dataset to be loaded using tfds
Returns:
A dictionary of relevant information for the loaded dataset.
"""
ds_info = tfds.builder(dataset_name).info
dataset_information = {
'num_classes': ds_info.features['label'].num_classes,
'data_shape': ds_info.features['image'].shape,
'train_num_examples': ds_info.splits['train'].num_examples,
}
return dataset_information | 4,481 |
def full_model(mode, hparams):
"""Make a clause search model including input pipeline.
Args:
mode: Either 'train' or 'eval'.
hparams: Hyperparameters. See default_hparams for details.
Returns:
logits, labels
Raises:
ValueError: If the model returns badly shaped tensors.
"""
if hparams.use_averages:
raise NotImplementedError('Figure out how to eval with Polyak averaging')
kind, model = all_models.make_model(name=hparams.model, mode=mode,
hparams=hparams, vocab=FLAGS.vocab)
batch_size = mode_batch_size(mode, hparams)
if kind == 'sequence':
# Read
_, conjectures, clauses, labels = inputs.sequence_example_batch(
mode=mode, batch_size=batch_size, shuffle=True)
clauses = tf.reshape(clauses, [2 * batch_size, -1])
labels = tf.reshape(labels, [2 * batch_size])
# Embed
vocab_size, _ = inputs.read_vocab(FLAGS.vocab)
conjectures, clauses = model_utils.shared_embedding_layer(
(conjectures, clauses), dim=hparams.embedding_size, size=vocab_size)
# Classify
conjectures = model.conjecture_embedding(conjectures)
conjectures = tf.reshape(
tf.tile(tf.reshape(conjectures, [batch_size, 1, -1]), [1, 2, 1]),
[2 * batch_size, -1])
clauses = model.axiom_embedding(clauses)
logits = model.classifier(conjectures, clauses)
elif kind == 'tree':
examples = inputs.proto_batch(mode=mode, batch_size=batch_size)
def weave(**ops):
return clause_loom.weave_clauses(
examples=examples, vocab=FLAGS.vocab, **ops)
logits, labels = model(weave)
elif kind == 'fast':
examples = inputs.proto_batch(mode=mode, batch_size=batch_size)
conjecture_sizes, conjecture_flat, clauses, labels = (
gen_clause_ops.random_clauses_as_fast_clause(
examples, vocab=FLAGS.vocab))
conjectures = jagged.Jagged(conjecture_sizes, conjecture_flat)
logits = model(conjectures, clauses)
# Done!
return fix_logits(kind, logits), labels | 4,482 |
def aggregatePredictions(df_pred, threshold=0.8):
"""
Aggregates probabilistic predictions, choosing the
state with the largest probability, if it exceeds
the threshold.
:param pd.DataFrame df_pred:
columns: state
rows: instance
values: float
:param float threshold:
:return pd.Series:
index: instance
values: state or np.nan if below threshold
"""
MISSING = -1
columns = df_pred.columns
values = []
df = df_pred.applymap(lambda v: 1 if v >= threshold
else MISSING)
for idx, row in df_pred.iterrows():
row_list = row.tolist()
pos = row_list.index(max(row_list))
values.append(columns[pos])
ser = pd.Series(values, index=df_pred.index)
ser = ser.apply(lambda v: np.nan if v == MISSING else v)
return ser | 4,483 |
def test_create_topic_fail():
"""
Test create topic fail for some reason
"""
publisher = mock.Mock()
publisher.create_topic.side_effect = exceptions.GoogleAPIError
with pytest.raises(exceptions.GoogleAPIError):
create_topic(publisher, "project", "topic") | 4,484 |
def GetLocalInstanceConfig(local_instance_id):
"""Get the path of instance config.
Args:
local_instance_id: Integer of instance id.
Return:
String, path of cf runtime config.
"""
cfg_path = os.path.join(GetLocalInstanceRuntimeDir(local_instance_id),
constants.CUTTLEFISH_CONFIG_FILE)
if os.path.isfile(cfg_path):
return cfg_path
return None | 4,485 |
def is_input_element(obj: Any) -> bool:
"""
Returns True, if the given object is an :class:`.InputElement`, or a
subclass of InputElement.
"""
return isinstance(obj, InputElement) | 4,486 |
def setup():
"""
Install uWSGI system wide and upload vassals
"""
install()
configure() | 4,487 |
def line_intersects_grid((x0,y0), (x1,y1), grid, grid_cell_size=1):
""" Performs a line/grid intersection, finding the "super cover"
of a line and seeing if any of the grid cells are occupied.
The line runs between (x0,y0) and (x1,y1), and (0,0) is the
top-left corner of the top-left grid cell.
>>> line_intersects_grid((0,0),(2,2),[[0,0,0],[0,1,0],[0,0,0]])
True
>>> line_intersects_grid((0,0),(0.99,2),[[0,0,0],[0,1,0],[0,0,0]])
False
"""
grid_cell_size = float(grid_cell_size)
x0 = x0 / grid_cell_size
x1 = x1 / grid_cell_size
y0 = y0 / grid_cell_size
y1 = y1 / grid_cell_size
dx = abs(x1 - x0)
dy = abs(y1 - y0)
x = int(math.floor(x0))
y = int(math.floor(y0))
if dx != 0:
dt_dx = 1.0 / dx
else:
dt_dx = inf
if dy != 0:
dt_dy = 1.0 / dy
else:
dt_dy = inf
t = 0.0
n = 1
if (dx == 0):
x_inc = 0
t_next_horizontal = dt_dx
elif (x1 > x0):
x_inc = 1
n += int(math.floor(x1)) - x
t_next_horizontal = (math.floor(x0) + 1 - x0) * dt_dx
else:
x_inc = -1
n += x - int(math.floor(x1))
t_next_horizontal = (x0 - math.floor(x0)) * dt_dx
if (dy == 0):
y_inc = 0
t_next_vertical = dt_dy
elif (y1 > y0):
y_inc = 1
n += int(math.floor(y1)) - y
t_next_vertical = (math.floor(y0) + 1 - y0) * dt_dy
else:
y_inc = -1
n += y - int(math.floor(y1))
t_next_vertical = (y0 - math.floor(y0)) * dt_dy
while (n > 0):
if grid[y][x] == 1:
return True
if (t_next_vertical < t_next_horizontal):
y += y_inc
t = t_next_vertical
t_next_vertical += dt_dy
else:
x += x_inc
t = t_next_horizontal
t_next_horizontal += dt_dx
n -= 1
return False | 4,488 |
def SimuGumbel(n, m, theta):
"""
# Gumbel copula
Requires:
n = number of variables to generate
m = sample size
theta = Gumbel copula parameter
"""
v = [np.random.uniform(0,1,m) for i in range(0,n)]
X = levy_stable.rvs(alpha=1/theta, beta=1,scale=(np.cos(np.pi/(2*theta)))**theta,loc=0, size=m)
phi_t = lambda t: np.exp(-t**(1/theta))
u = [phi_t(-np.log(v[i])/X) for i in range(0,n)]
return u | 4,489 |
def Norm(x, y):
"""求一个二维向量模长"""
return math.pow(math.pow(x, 2) + math.pow(y, 2), 0.5) | 4,490 |
def make_roi(ms_experiment: ms_experiment_type, tolerance: float,
max_missing: int, min_length: int, min_intensity: float,
multiple_match: str, targeted_mz: Optional[np.ndarray] = None,
start: Optional[int] = None, end: Optional[int] = None,
mz_reduce: Union[str, Callable] = "mean",
sp_reduce: Union[str, Callable] = "sum",
mode: Optional[str] = None
) -> List[Roi]:
"""
Make Region of interest from MS data in centroid mode.
Used by MSData to as the first step of the centWave algorithm.
Parameters
----------
ms_experiment: pyopenms.MSExperiment
max_missing : int
maximum number of missing consecutive values. when a row surpass this
number the roi is considered as finished and is added to the roi list if
it meets the length and intensity criteria.
min_length : int
The minimum length of a roi to be considered valid.
min_intensity : float
Minimum intensity in a roi to be considered valid.
tolerance : float
mz tolerance to connect values across scans
start : int, optional
First scan to analyze. If None starts at scan 0
end : int, optional
Last scan to analyze. If None, uses the last scan number.
multiple_match : {"closest", "reduce"}
How to match peaks when there is more than one match. If mode is
`closest`, then the closest peak is assigned as a match and the
others are assigned to no match. If mode is `reduce`, then unique
mz and intensity values are generated using the reduce function in
`mz_reduce` and `sp_reduce` respectively.
mz_reduce : "mean" or Callable
function used to reduce mz values. Can be a function accepting
numpy arrays and returning numbers. Only used when `multiple_match`
is reduce. See the following prototype:
.. code-block:: python
def mz_reduce(mz_match: np.ndarray) -> float:
pass
sp_reduce : {"mean", "sum"} or Callable
function used to reduce intensity values. Can be a function accepting
numpy arrays and returning numbers. Only used when `multiple_match`
is reduce. To use custom functions see the prototype shown on
`mz_reduce`.
targeted_mz : numpy.ndarray, optional
if a list of mz is provided, roi are searched only using this list.
mode : str, optional
mode used to create Roi objects.
Returns
-------
roi: list[Roi]
Notes
-----
To create a ROI, m/z values in consecutive scans are connected if they are
within the tolerance`. If there's more than one possible m/z value to
connect in the next scan, two different strategies are available, using the
`multiple_match` parameter: If "closest" is used, then m/z values are
matched to the closest ones, and the others are used to create new ROI. If
"reduce" is used, then all values within the tolerance are combined. m/z and
intensity values are combined using the `mz_reduce` and `sp_reduce`
parameters respectively. If no matching value has be found in a scan, a NaN
is added to the ROI. If no matching values are found in `max_missing`
consecutive scans the ROI is flagged as finished. In this stage, two
checks are made before the ROI is considered valid:
1. The number of non missing values must be higher than `min_length`.
2. The maximum intensity value in the ROI must be higher than
`min_intensity`.
If the two conditions are meet, the ROI is added to the list of valid ROI.
"""
if start is None:
start = 0
if end is None:
end = ms_experiment.getNrSpectra()
if targeted_mz is None:
mz_seed, _ = ms_experiment.getSpectrum(start).get_peaks()
targeted = False
else:
mz_seed = targeted_mz
targeted = True
size = end - start
rt = np.zeros(size)
processor = _RoiProcessor(mz_seed, max_missing=max_missing,
min_length=min_length,
min_intensity=min_intensity, tolerance=tolerance,
multiple_match=multiple_match,
mz_reduce=mz_reduce, sp_reduce=sp_reduce,
mode=mode)
for k_scan in range(start, end):
sp = ms_experiment.getSpectrum(k_scan)
rt[k_scan - start] = sp.getRT()
mz, spint = sp.get_peaks()
processor.add(mz, spint, targeted=targeted)
processor.append_to_roi(rt, targeted=targeted)
# add roi not completed during last scan
processor.flag_as_completed()
processor.append_to_roi(rt)
return processor.roi | 4,491 |
def get_all_zcs_containers(session, start=None, limit=None, return_type=None,
**kwargs):
"""
Retrieves details for all Zadara Container Services (ZCS) containers
configured on the VPSA.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type start: int
:param start: The offset to start displaying ZCS containers from.
Optional.
:type: limit: int
:param limit: The maximum number of ZCS containers to return. Optional.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
parameters = verify_start_limit(start, limit)
path = '/api/containers.json'
return session.get_api(path=path, parameters=parameters,
return_type=return_type, **kwargs) | 4,492 |
def trainAndPredictModel(model_type="basic", features_cutoff=3, regularizer=1, pretrained=False, viterbi_cutoff=20):
"""main interface method for easily training a model, running inference for predictions,
evaluate it and generate competition file for it."""
data = readData(train_file)
features = constructFeatures(model_type, data, features_cutoff)
model = createModel(data, features, regularizer, pretrained=pretrained)
trainModel(model, pretrained=pretrained)
results = evaluateModel(data, model, viterbi_cutoff)
results.append("Features Cutoff: " + str(features_cutoff))
results.append("Regularizer: " + str(regularizer))
results.append("Viterbi Cutoff: " + str(viterbi_cutoff)) | 4,493 |
def jordan_wigner(n):
"""
Generates the Jordan-Wigner representation of the fermionic creation, annihilation,
and Majorana operators for an n-mode system.
The convention for the Majorana operators is as follows:
c_j=aj^{dag}+aj
c_{n+j}=i(aj^{dag}-aj)
"""
s = ket(2, 0) @ dag(ket(2, 1))
S = su_generators(2)
a = {} # Dictionary for the annihilation operators
c = {} # Dictionary for the Majorana operators
for j in range(1, n + 1):
a[j] = tensor([S[3], j - 1], s, [S[0], n - j])
c[j] = dag(a[j]) + a[j]
c[n + j] = 1j * (dag(a[j]) - a[j])
return a, c | 4,494 |
def make_update(label: str, update_time: str, repeat: str, data: str, news: str) -> list[dict]:
"""Schedules an update with name 'label' to happen in 'interval' seconds. Updates saved covid
data, news and repeats the update depending on the content of the respective parameters. Adds
to global 'scheduled_updates' list and returns scheduler queue.
"""
# Check that at least one option has been chosen
if not data and not news:
logging.warning("Attempted to schedule update without selecting any options.")
return scheduler.queue
# Check update will be in at least 5 seconds from current time
interval = hhmm_to_seconds(update_time) - hhmm_to_seconds( current_time_hhmm() )
if interval < 5:
logging.warning("Attempted to schedule update too soon.")
return scheduler.queue
# Dictionary to store all information about the update
update = {
'title': label,
'content': f"At {update_time} this update will: "
}
if data:
# Schedule data update
update['data'] = schedule_covid_updates(interval, label)
update['content'] += "update covid data, "
logging.info("Covid data update has been scheduled for %s", update_time)
if news:
# Schedule news update
update['news'] = scheduler.enter(interval, 1, update_news, (label,))
update['content'] += "update covid news, "
logging.info("News update has been scheduled for %s", update_time)
if repeat:
# Schedule update to repeat in 24 hrs
update['repeat'] = scheduler.enter(
60*60*24, 1, make_update, (label, update_time, repeat, data, news)
)
update['content'] += "repeat in 24 hours, "
logging.info("Update %s has been scheduled to repeat itself in 24 hours", label)
# Clean up update content to be displayed
update['content'] = update['content'][ :len( update['content'] ) - 2 ]
scheduled_updates.append(update)
return scheduler.queue | 4,495 |
def performTest(name, test): #{{{
"""
Given a series of writes in `test', generate a format string
and pass it to the vulnerable program. If the writes were successful
without destroying any other memory locations, return True.
Terminates after 2 seconds to handle infinite loops in libformatstr.
"""
f = FormatStr(maxbuf)
for (k,v) in test.items():
f[k] = v
(out, err, fill) = (None, None, None)
def sighandler(signum, frame):
raise Exception("Command timed out")
signal.signal(signal.SIGALRM, sighandler)
signal.alarm(2)
try:
payload = f.payload(offset, padding=shift)
if len(payload) > maxbuf:
print "[-] payload is longer than allowed (%d vs %s)" % (len(payload), maxbuf)
(out, err, fill) = checkOutput(payload)
except Exception,e:
print "[-] Exception occurred: %s" % e
signal.alarm(0)
if err == None or not checkMemoryDump(err, fill, f.mem):
print "[-] FAILED: Test \"%s\" failed" % name
return False
else:
print "[+] SUCCESS: Test \"%s\" succeeded" % name
return True | 4,496 |
def readsignal_VEC(name , fa):
"""
Reads the time signal stored in the file var.txt and
written in a single column format. Returns the signal into the
single vector signal.
fa is an instrumental amplification factor
"""
path = '../data/'
channel = np.loadtxt(path + name + '.txt')
ndats = len(channel)
signal = np.zeros([ndats], dtype=float)
for i in range(ndats):
signal[i]=channel[i]*fa
#
return ndats , signal | 4,497 |
def extract_slide_texture_features(index, output_segment, slide_path, halo_roi_path, method_data):
"""Extract slide texture features
Args:
index (string): main index string
output_segment (string): path to write result parquet
slide_path (string): path to the whole slide image
halo_roi_path (string): path to halo roi path
method_data (dict): method parameters with annotation and tile details
including annotationLabel, stainChannel and tileSize
Returns:
tuple: path to features saved as a np.array & path to feature metadata saved as a parquet.
"""
print ("Hello from extract_slide_texture_features()")
annotation_name, stain_channel, TILE_SIZE = method_data['annotationLabel'], method_data['stainChannel'], method_data['tileSize']
dest_dir=f"/gpfs/mskmind_ess/aukermaa/data/{index}/original_glcm_ClusterTendency/"
os.makedirs(dest_dir, exist_ok=True)
img_arr, sample_arr, mask_arr = get_slide_roi_masks(
slide_path=slide_path,
halo_roi_path=halo_roi_path,
annotation_name=annotation_name)
vectors = get_stain_vectors_macenko(sample_arr)
print ("Stain vectors=", vectors)
print ("Max x levels:", img_arr.shape[0])
if (os.path.exists(f"{dest_dir}/vector.npy")):
print ("Output already generated, not doing anything...")
return dest_dir, output_segment
features = np.array([])
nrow = 0
for x in range(0, img_arr.shape[0], TILE_SIZE):
nrow += 1
for y in range(0, img_arr.shape[1], TILE_SIZE):
img_patch = img_arr [x:x+TILE_SIZE, y:y+TILE_SIZE, :]
mask_patch = mask_arr[x:x+TILE_SIZE, y:y+TILE_SIZE]
if mask_patch.sum() == 0: continue
address = f"{index}_{x}_{y}"
try:
texture_values = extract_patch_texture_features(address,
img_patch,
mask_patch,
stain_vectors=vectors,
stain_channel=stain_channel,
glcm_feature='original_glcm_ClusterTendency')
if not texture_values is None:
features = np.append(features, texture_values)
except Exception as exc:
print (f"Skipped tile {address} because: {exc}")
print (f"On row {nrow} of {len(range(0, img_arr.shape[0], TILE_SIZE))}")
n, (smin, smax), sm, sv, ss, sk = stats.describe(features)
hist_features = {
'main_index': index,
'pixel_original_glcm_ClusterTendency_nobs': n,
'pixel_original_glcm_ClusterTendency_min': smin,
'pixel_original_glcm_ClusterTendency_max': smax,
'pixel_original_glcm_ClusterTendency_mean': sm,
'pixel_original_glcm_ClusterTendency_variance': sv,
'pixel_original_glcm_ClusterTendency_skewness': ss,
'pixel_original_glcm_ClusterTendency_kurtosis': sk
}
data_table = pd.DataFrame(data=hist_features, index=[0]).set_index('main_index')
print (data_table)
pq.write_table(pa.Table.from_pandas(data_table), output_segment)
print ("Saved to", output_segment)
np.save(f"{dest_dir}/vector.npy", features)
return dest_dir, output_segment | 4,498 |
def connect(host="localhost", port=27450):
"""Connect to server."""
client = socket(AF_INET, SOCK_DGRAM)
client.connect((host, port))
return client | 4,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.