content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def test_gets():
"""test get_first and get_second"""
two_elem_int_list = [1, 2]
two_elem_int_tuple = (1, 2)
with pytest.raises(ValueError):
get_first(1)
with pytest.raises(ValueError):
get_first([])
assert get_first(two_elem_int_list) == 1
assert get_first(two_elem_int_tuple) == 1
with pytest.raises(ValueError):
get_second([1])
assert get_second(two_elem_int_list) == 2
assert get_second(two_elem_int_tuple) == 2 | 3,500 |
def block(**arguments: Any) -> Optional[Blocks]:
"""Python application interface for creating an initial block file from command line or python code.
This method creates an HDF5 file associated with the desired intial flow specification (for each
needed computational field), suitable for input by the FLASH application at runtime.
Keyword Arguments:
ndim (int): Number of simulation dimensions (i.e., 2 or 3).
nxb (int): Number of grid points per block in the i direction.
nyb (int): Number of grid points per block in the j direction.
nzb (int): Number of grid points per block in the k direction.
iprocs (int): Number of blocks in the i direction.
jprocs (int): Number of blocks in the j direction.
kprocs (int): Number of blocks in the k direction.
fields (dict): Key/value pairs for fields (e.g., {'temp': 'center', ...})
fmethod (dict): Key/value pairs for flow initialization (e.g., {'temp': 'constant', ...}).
fparam (dict): Key/value pairs for paramaters (e.g., {'temp': {'const': 0.5, ...}, ...}) used for each field method.
path (str): Path to source files used in some initialization methods (e.g., python).
dest (str): Path to initial block hdf5 file.
ignore (bool): Ignore configuration file provided arguments, options, and flags.
result (bool): Return the calculated fields by block on root.
nofile (bool): Do not write the calculated fields by block to file.
Note:
By default this function reads the grid data from the hdf5 file (i.e., must run create.grid() first); optionally
you can provide the result from grid creation directly by using an optional keyword -- coords: (ndarray, ...).
"""
args = process_arguments(**arguments)
path = args.pop('dest')
ndim = args.pop('ndim')
procs = args.pop('procs')
sizes = args.pop('sizes')
result = args.pop('result')
nofile = args.pop('nofile')
cmdline = args.pop('cmdline', False)
coords = args.pop('coords', None)
with args.pop('context')() as progress:
if coords is None: coords = read_coords(path=path, ndim=ndim)
shapes = get_shapes(ndim=ndim, procs=procs, sizes=sizes)
grids = get_grids(coords=coords, ndim=ndim, procs=procs, sizes=sizes)
blocks, index = calc_blocks(grids=grids, procs=procs, shapes=shapes, **args)
if not nofile: write_blocks(blocks=blocks, index=index, path=path, shapes=shapes)
if not result: return None
if cmdline: screen_out(blocks=blocks)
return blocks | 3,501 |
def test__string():
""" test vmatrix.string
"""
vma = vmatrix.from_string(vmatrix.string(CH4O_VMA))
assert vma == CH4O_VMA | 3,502 |
def enable_multivsys(fw_conn):
"""Enable Multi-vsys
Args:
fw_conn (PanDevice): A panos object for device
"""
command = '<set><system><setting><multi-vsys>on</multi-vsys></setting></system></set>'
fw_conn.op(cmd=command, cmd_xml=False) | 3,503 |
def _low_discrepancy(dim, n, seed=0.5):
"""Generate a 1d, 2d, or 3d low discrepancy sequence of coordinates.
Parameters
----------
dim : one of {1, 2, 3}
The dimensionality of the sequence.
n : int
How many points to generate.
seed : float or array of float, shape (dim,)
The seed from which to start the quasirandom sequence.
Returns
-------
pts : array of float, shape (n, dim)
The sampled points.
References
----------
..[1]: http://extremelearning.com.au/unreasonable-effectiveness-of-quasirandom-sequences/
"""
phi1 = 1.6180339887498948482
phi2 = 1.32471795724474602596
phi3 = 1.22074408460575947536
seed = np.broadcast_to(seed, (1, dim))
phi = np.array([phi1, phi2, phi3])
g = 1 / phi
n = np.reshape(np.arange(n), (n, 1))
pts = (seed + (n * g[:dim])) % 1
return pts | 3,504 |
def get(
url: str
) -> Dict[str, object]:
"""
Returns the sdk GET response
:param url: A string url endpoint.
:type: str
:return: Dict[str, object]
"""
try:
res = requests.get(url, headers=get_headers())
except Exception as e:
handle_request_error(e)
return handle_response(res) | 3,505 |
def register(request):
"""Create an account for a new user"""
if request.method == 'POST':
data = request.POST.copy()
form = tcdUserCreationForm(data)
next = request.POST['next']
if form.is_valid():
new_user = User.objects.create_user(username=data['username'],
password=data['password1'],
email=data['email'])
new_user.is_staff=False
new_user.is_superuser=False
new_user.is_active=True
new_user.save()
new_user = auth.authenticate(username=new_user.username,
password=data['password1'])
auth.login(request, new_user)
new_user_profile = Profile(user=new_user,
score=0
)
new_user_profile.save()
return HttpResponseRedirect(next)
else:
form = tcdUserCreationForm()
if 'next' in request.GET:
next = request.GET['next']
else:
next = "/"
return render_to_response("registration/register.html",
{'form' : form,
'redirect' : next},
context_instance=RequestContext(request)
) | 3,506 |
def set_index_da_ct(da):
"""Stacks all coordinates into one multindex and automatically generates a long_name"""
coordnames = list(da.coords)
da_stacked = da.set_index(ct=coordnames)
if len(coordnames) == 1:
#only one coordinate just rename ct to the coordinate name
da_unstacked = da_stacked.rename(ct=coordnames[0])
else:
#generate multindex
long_name_string = 'Test Case ('
for coord in da.coords:
if 'long_name' in da.coords[coord].attrs:
long_name_string = long_name_string + da.coords[coord].attrs['long_name'] + ', '
else:
long_name_string = long_name_string + coord + ', '
#remove last comma and close parentheses
long_name_string = long_name_string[0:-2] + ')'
da_stacked.coords['ct'].attrs = dict(long_name=long_name_string)
da_unstacked = da_stacked.unstack()
for coord in da.coords:
da_unstacked.coords[coord].attrs = da.coords[coord].attrs
return da_unstacked, da_stacked | 3,507 |
def test_list_ncname_max_length_3_nistxml_sv_iv_list_ncname_max_length_4_1(mode, save_output, output_format):
"""
Type list/NCName is restricted by facet maxLength with value 8.
"""
assert_bindings(
schema="nistData/list/NCName/Schema+Instance/NISTSchema-SV-IV-list-NCName-maxLength-4.xsd",
instance="nistData/list/NCName/Schema+Instance/NISTXML-SV-IV-list-NCName-maxLength-4-1.xml",
class_name="NistschemaSvIvListNcnameMaxLength4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
) | 3,508 |
def showSelectionInTitle(*args, **kwargs):
"""
This command causes the title of the window specified as an argument to be linked to the current file and selection.
Returns: None
"""
pass | 3,509 |
def LikeView(request, pk):
"""Function view that manages the likes and dislikes of a post"""
post = get_object_or_404(Post, id=request.POST.get('post_id'))
liked = False
if post.likes.filter(id=request.user.id).exists():
post.likes.remove(request.user)
liked = False
else:
post.likes.add(request.user)
liked = True
return HttpResponseRedirect(reverse('post-detail', args=[str(pk)])) | 3,510 |
def set_edge_color_mapping(table_column, table_column_values=None, colors=None, mapping_type='c', default_color=None,
style_name=None, network=None, base_url=DEFAULT_BASE_URL):
"""Map table column values to colors to set the edge color.
Args:
table_column (str): Name of Cytoscape table column to map values from
table_column_values (list): List of values from Cytoscape table to be used in mapping
colors (list): list of hex colors to map to ``table_column_values``
mapping_type (str): continuous, discrete or passthrough (c,d,p); default is continuous
default_color (str): Hex color to set as default
style_name (str): name for style
network (SUID or str or None): Name or SUID of a network or view. Default is the
"current" network active in Cytoscape.
base_url (str): Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://127.0.0.1:1234
and the latest version of the CyREST API supported by this version of py4cytoscape.
Returns:
str: ''
Raises:
CyError: if invalid color, table column doesn't exist, table column values doesn't match values list, or invalid style name, network or mapping type
requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error
Examples:
>>> set_edge_color_mapping('EdgeBetweenness', [1.0, 16.36], ['#FBE723', '#440256'], style_name='galFiltered Style')
''
>>> set_edge_color_mapping('EdgeBetweenness', ['1', '2'], ['#FFFF00', '#00FF00'], 'd', style_name='galFiltered Style')
''
>>> set_edge_color_mapping(**gen_node_color_map('Degree', mapping_type='d'))
''
>>> set_edge_color_mapping(**gen_edge_color_map('interaction', palette_color_brewer_q_Accent(), mapping_type='d'))
''
>>> set_edge_color_mapping(**gen_edge_color_map('EdgeBetweenness'))
''
>>> set_edge_color_mapping(**gen_edge_color_map('EdgeBetweenness', palette_color_brewer_s_Blues()))
''
>>> set_edge_color_mapping(**gen_edge_color_map('EdgeBetweenness', (palette_color_brewer_s_Blues(), palette_color_brewer_d_BrBG()))
''
>>> set_edge_color_mapping('EdgeBetweennessColor', mapping_type='p', default_color='#654321', style_name='galFiltered Style')
''
See Also:
:meth:`gen_edge_color_map`
See Also:
`Value Generators <https://py4cytoscape.readthedocs.io/en/0.0.9/concepts.html#value-generators>`_ in the Concepts section in the py4cytoscape User Manual.
"""
verify_hex_colors(colors)
# set default
if default_color is not None:
style_defaults.set_edge_color_default(default_color, style_name, base_url=base_url)
# TODO: An error here will be missed ... shouldn't this throw an exception?
# perform mapping for COLOR (i.e., when arrowColorMatchesEdge=T)
# TODO: This code checks table_column, but the R code does not
res = _update_visual_property('EDGE_UNSELECTED_PAINT', table_column, table_column_values=table_column_values, range_map=colors,
mapping_type=mapping_type, style_name=style_name, network=network,
base_url=base_url, table='edge')
if res is not None:
# perform mapping for STROKE (i.e., when arrowColorMatchesEdge=F)
res = _update_visual_property('EDGE_STROKE_UNSELECTED_PAINT', table_column,
table_column_values=table_column_values, range_map=colors,
mapping_type=mapping_type, style_name=style_name, network=network,
base_url=base_url, table='edge')
return res | 3,511 |
def retrieve_tape_recovery_point(TapeARN=None, GatewayARN=None):
"""
Retrieves the recovery point for the specified virtual tape. This operation is only supported in the tape gateway architecture.
A recovery point is a point in time view of a virtual tape at which all the data on the tape is consistent. If your gateway crashes, virtual tapes that have recovery points can be recovered to a new gateway.
See also: AWS API Documentation
Examples
Retrieves the recovery point for the specified virtual tape.
Expected Output:
:example: response = client.retrieve_tape_recovery_point(
TapeARN='string',
GatewayARN='string'
)
:type TapeARN: string
:param TapeARN: [REQUIRED]
The Amazon Resource Name (ARN) of the virtual tape for which you want to retrieve the recovery point.
:type GatewayARN: string
:param GatewayARN: [REQUIRED]
The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to return a list of gateways for your account and region.
:rtype: dict
:return: {
'TapeARN': 'string'
}
"""
pass | 3,512 |
def weld_describe(array, weld_type, aggregations):
""" Aggregate during the same evaluation as opposed to separately as in Series.agg
Parameters
----------
array : np.ndarray or WeldObject
to aggregate on
weld_type : WeldType
of the array
aggregations : list of str
supported are = {'min', 'max', 'sum', 'prod', 'mean', 'std'}
Returns
-------
WeldObject
"""
assert isinstance(aggregations, list)
assert len(aggregations) > 0
weld_obj = WeldObject(_encoder, _decoder)
array_var = weld_obj.update(array)
if isinstance(array, WeldObject):
array_var = array.obj_id
weld_obj.dependencies[array_var] = array
merger_chunk = """
let agg_%(name)s = f64(
result(
for(
%(array)s,
merger[%(type)s, %(operation)s],
|b, i, e|
merge(b, e)
)
)
);"""
mean_chunk_solo = """
let agg_mean = f64(
result(
for(
%(array)s,
merger[%(type)s, +],
|b, i, n|
merge(b, n)
)
)
) / f64(len(%(array)s));"""
mean_chunk_with_sum = """
let agg_mean = agg_sum / f64(len(%(array)s));
"""
std_chunk_solo = """
%(mean)s
let agg_std = sqrt(
result(
for(
%(array)s,
merger[f64, +],
|b, i, n|
merge(b, pow(f64(n) - agg_mean, 2.0))
)
) / f64(len(%(array)s) - 1L)
);""".replace('%(mean)s', mean_chunk_with_sum if 'sum' in aggregations else mean_chunk_solo)
std_chunk_with_mean = """
let agg_std = sqrt(
result(
for(
%(array)s,
merger[f64, +],
|b, i, n|
merge(b, pow(f64(n) - agg_mean, 2.0))
)
) / f64(len(%(array)s) - 1L)
);"""
aggregations_dict = {'min': merger_chunk.replace('%(operation)s', 'min').replace('%(name)s', 'min'),
'max': merger_chunk.replace('%(operation)s', 'max').replace('%(name)s', 'max'),
'sum': merger_chunk.replace('%(operation)s', '+').replace('%(name)s', 'sum'),
'prod': merger_chunk.replace('%(operation)s', '*').replace('%(name)s', 'prod'),
'mean': mean_chunk_with_sum if 'sum' in aggregations else mean_chunk_solo,
'std': std_chunk_with_mean if 'mean' in aggregations else std_chunk_solo}
weld_template = """
%(chunks)s
let agg_result = appender[f64];
%(merges)s
result(agg_result)
"""
chunks = ''.join([aggregations_dict[agg] for agg in aggregations])
merges = ''.join(['let agg_result = merge(agg_result, %s);\n\t' % ('agg_' + agg) for agg in aggregations])
weld_obj.weld_code = weld_template % {'chunks': chunks, 'merges': merges} \
% {'array': array_var, 'type': weld_type}
return weld_obj | 3,513 |
def roles_required(*roles):
"""Decorator which specifies that a user must have all the specified roles.
Example::
@app.route('/dashboard')
@roles_required('admin', 'editor')
def dashboard():
return 'Dashboard'
The current user must have both the `admin` role and `editor` role in order
to view the page.
:param args: The required roles.
"""
def wrapper(fn):
@wraps(fn)
def decorated_view(*args, **kwargs):
perms = [Permission(RoleNeed(role)) for role in roles]
for perm in perms:
if not perm.can():
if _security._unauthorized_callback:
return _security._unauthorized_callback()
else:
return _get_unauthorized_view()
return fn(*args, **kwargs)
return decorated_view
return wrapper | 3,514 |
def get_all_forms(url):
"""Given a `url`, it returns all forms from the HTML content"""
soup = bs(requests.get(url).content, "html.parser")
return soup.find_all("form") | 3,515 |
def cal_max_len(ids, curdepth, maxdepth):
"""calculate max sequence length"""
assert curdepth <= maxdepth
if isinstance(ids[0], list):
res = max([cal_max_len(k, curdepth + 1, maxdepth) for k in ids])
else:
res = len(ids)
return res | 3,516 |
def urlencode(query, doseq=True, quote_via=quote_plus):
"""
An alternate implementation of Python's stdlib
:func:`urllib.parse.urlencode` function which accepts unicode keys and
values within the ``query`` dict/sequence; all Unicode keys and values are
first converted to UTF-8 before being used to compose the query string.
The value of ``query`` must be a sequence of two-tuples
representing key/value pairs *or* an object (often a dictionary)
with an ``.items()`` method that returns a sequence of two-tuples
representing key/value pairs.
For minimal calling convention backwards compatibility, this
version of urlencode accepts *but ignores* a second argument
conventionally named ``doseq``. The Python stdlib version behaves
differently when ``doseq`` is False and when a sequence is
presented as one of the values. This version always behaves in
the ``doseq=True`` mode, no matter what the value of the second
argument.
Both the key and value are encoded using the ``quote_via`` function which
by default is using a similar algorithm to :func:`urllib.parse.quote_plus`
which converts spaces into '+' characters and '/' into '%2F'.
.. versionchanged:: 1.5
In a key/value pair, if the value is ``None`` then it will be
dropped from the resulting output.
.. versionchanged:: 1.9
Added the ``quote_via`` argument to allow alternate quoting algorithms
to be used.
"""
try:
# presumed to be a dictionary
query = query.items()
except AttributeError:
pass
result = ''
prefix = ''
for (k, v) in query:
k = quote_via(k)
if is_nonstr_iter(v):
for x in v:
x = quote_via(x)
result += '%s%s=%s' % (prefix, k, x)
prefix = '&'
elif v is None:
result += '%s%s=' % (prefix, k)
else:
v = quote_via(v)
result += '%s%s=%s' % (prefix, k, v)
prefix = '&'
return result | 3,517 |
def clip_action(action, action_min, action_max):
""" Truncates the entries in action to the range defined between
action_min and action_max. """
return np.clip(action, action_min, action_max) | 3,518 |
def run_delphi(pdb_file, output_directory, output_filename,
delphi_path, radius_file=None, charge_file=None, grid_size=101, surface=None, center=False):
""" Run Delphi on protein surface created by MSMS program """
# TODO: Rewrite using template string
if not os.path.isdir(output_directory):
os.mkdir(output_directory)
this_script_path = os.path.dirname(os.path.realpath(__file__))
if radius_file is None:
radius_file = this_script_path + '/charmm.siz'
if charge_file is None:
charge_file = this_script_path + '/charmm.crg'
parameters = [
f'in(pdb,file="{pdb_file}")',
f'in(siz,file="{radius_file}")',
f'in(crg,file="{charge_file}")',
f'gsize={grid_size}',
f'salt=0.15',
f'exdi=80',
f'linit=2000',
f'maxc=0.0000000001',
f'out(phi, file="{output_directory}/{output_filename}.cub", format="cube")',
]
if center:
parameters += ['acenter(0,0,0)']
if surface:
parameters += [
f'in(frc,file="{surface}")',
f'out(frc, file="{output_directory}/{output_filename}.pot")',
f'site(Atom, Potential, Reaction, Coulomb, Field)',
]
print('\n'.join(parameters) + '\n', file=open(f'{output_filename}_tmp.prm', 'w'))
subprocess.run([delphi_path, f'{output_filename}_tmp.prm'])
os.remove(f'{output_filename}_tmp.prm') | 3,519 |
def isGray(image):
"""Return True if the image has one channel per pixel."""
return image.ndim < 3 | 3,520 |
def prophet_plot(
df,
fig,
today_index,
lookback_days=None,
predict_days=21,
outliers=list()):
"""
Plot the actual, predictions, and anomalous values
Args
----
df : pandas DataFrame
The daily time-series data set contains ds column for
dates (datetime types such as datetime64[ns]) and y column for numerical values
fig : matplotlib Figure
A plot with actual data, predicted values and the interval which we previously obtained
from Prophet's model.plot(forecast).
today_index : int
The index of the date list in the dataframe dividing the baseline and prediction time frames.
lookback_days : int, optional (default=None)
Day (today_index-lookback_days)th to Day (today_index-1)th is the baseline time frame for training.
predict_days : int, optional (default=21)
Make prediction for Day (today_index)th to Day (today_index+predict_days)th.
outliers : a list of (datetime, int) tuple
The outliers we want to highlight on the plot.
"""
# retrieve the subplot in the generated Prophets matplotlib figure
ax = fig.get_axes()[0]
fig.autofmt_xdate(bottom=0.2, rotation=30, ha='right')
start = 0
# end = today_index + predict_days # Original code
end = df.shape[0]
x_pydatetime = df['ds'].dt.to_pydatetime()
# highlight the actual values of the entire time frame
ax.plot(x_pydatetime[start:end],
df.y[start:end],
color='orange', label='Actual')
# plot each outlier in red dot and annotate the date
for outlier in outliers:
ax.scatter(outlier[0], outlier[1], s=16, color='red', label='Anomaly')
# ax.text(outlier[0], outlier[1], str(outlier[0])[:10], color='red', fontsize=6)
# highlight baseline time frame with gray background
if lookback_days:
start = today_index - lookback_days
ax.axvspan(x_pydatetime[start],
x_pydatetime[today_index],
color=sns.xkcd_rgb['grey'],
alpha=0.2)
# annotate the areas, and position the text at the bottom 5% by using ymin
# + (ymax - ymin) / 20
ymin, ymax = ax.get_ylim()[0], ax.get_ylim()[1]
ax.text(x_pydatetime[int((start + today_index) / 2)],
ymin + (ymax - ymin) / 20, 'Baseline area')
ax.text(x_pydatetime[int((today_index * 2 + predict_days) / 2)],
ymin + (ymax - ymin) / 20, 'Prediction area')
# re-organize the legend
patch1 = mpatches.Patch(color='red', label='Anomaly')
patch2 = mpatches.Patch(color='orange', label='Actual')
patch3 = mpatches.Patch(color='skyblue', label='Predict and interval')
patch4 = mpatches.Patch(color='grey', label='Baseline area')
plt.legend(handles=[patch1, patch2, patch3, patch4])
# If you want to save the file
# folder = '/Users/guillaume/Documents/DS2020/XXXX/XXXX/figures/'
# name = 'Prediction_' + df.iloc[today_index, 0].strftime('%Y-%m-%d')
# filename = folder + name + '.png'
# plt.savefig(filename, bbox_inches="tight")
# plt.show() | 3,521 |
def verbose_traceroute_icmp(dest,
hops = DEFAULT_HOPS,
q = DEFAULT_QUERIES,
timeout = DEFAULT_TIMEOUT,
ttl = DEFAULT_MINTTL,
wait = DEFAULT_WAIT):
"""trace route to dest verbosely using ICMP"""
verbose_traceroute(traceroute_icmp, dest, hops, q, timeout, ttl,
wait) | 3,522 |
def mat2img(input_matrix, index_matrix):
"""
Transforms a batch of features of matrix images in a batch of features of vector images.
Args:
input_matrix (torch.Tensor): The images with shape (batch, features, matrix.size).
index_matrix (torch.Tensor): The index matrix for the images, shape(1, 1, matrix.size).
"""
logger = logging.getLogger(__name__ + '.mat2img')
logger.debug('input matrix shape : {}'.format(input_matrix.size()))
image_length = index_matrix[0, 0, torch.ge(index_matrix[0, 0], 0)].size(0)
logger.debug('new image length : {}'.format(image_length))
images = input_matrix.new_zeros((input_matrix.size(0), input_matrix.size(1), image_length), dtype=torch.float)
logger.debug('new images shape : {}'.format(images.size()))
for i in range(index_matrix.size(-2)): # iterate over the rows of index matrix
for j in range(index_matrix.size(-1)): # iterate over the cols of index matrix
if index_matrix[0, 0, i, j] != -1:
images[:, :, int(index_matrix[0, 0, i, j])] = input_matrix[:, :, i, j]
return images | 3,523 |
def _check_molecule_format(val):
"""If it seems to be zmatrix rather than xyz format we convert before returning"""
atoms = [x.strip() for x in val.split(";")]
if atoms is None or len(atoms) < 1: # pylint: disable=len-as-condition
raise QiskitNatureError("Molecule format error: " + val)
# An xyz format has 4 parts in each atom, if not then do zmatrix convert
# Allows dummy atoms, using symbol 'X' in zmatrix format for coord computation to xyz
parts = [x.strip() for x in atoms[0].split(" ")]
if len(parts) != 4:
try:
zmat = []
for atom in atoms:
parts = [x.strip() for x in atom.split(" ")]
z = [parts[0]]
for i in range(1, len(parts), 2):
z.append(int(parts[i]))
z.append(float(parts[i + 1]))
zmat.append(z)
xyz = z2xyz(zmat)
new_val = ""
for atm in xyz:
if atm[0].upper() == "X":
continue
if new_val:
new_val += "; "
new_val += f"{atm[0]} {atm[1]} {atm[2]} {atm[3]}"
return new_val
except Exception as exc:
raise QiskitNatureError("Failed to convert atom string: " + val) from exc
return val | 3,524 |
def measure_area_perimeter(mask):
"""A function that takes either a segmented image or perimeter
image as input, and calculates the length of the perimeter of a lesion."""
# Measure area: the sum of all white pixels in the mask image
area = np.sum(mask)
# Measure perimeter: first find which pixels belong to the perimeter.
perimeter = measure.perimeter(mask)
return area, perimeter | 3,525 |
def _click_up_params(user_email: str) -> dict:
"""
Load a Click Up parameters for this user.
Args:
user_email (str): Email of user making the request.
Returns:
(dict): A dict containing the elements:
'success': (Boolean) True if successful, otherwise False
'message': (str) Message to display to the user if not successful
"""
# Make sure the server's environment is set up properly.
param_names = ['CLICK_UP_BASE_URL', 'CLICK_UP_REDIRECT_PATH', 'CLICK_UP_AUTH_URL', 'CLICK_UP_CLIENT_ID', 'CLICK_UP_CLIENT_SECRET']
missing_params = []
for param in param_names:
if os.environ.get(param, None) is None:
missing_params.append(param)
if missing_params:
LOGGER.error(f"Missing Click Up environment variables: {missing_params}")
return {'success': False, 'message': 'Click Up environment is not configured. Check log file.'}
# See if the user is logged in to Click Up.
access_token = session.get('click_up_access_token')
LOGGER.debug(f"Click Up Access Token: {access_token}")
if access_token is None:
LOGGER.debug('User is not logged in to Click Up')
return _make_click_up_login()
url = os.environ.get('CLICK_UP_BASE_URL', None)
headers = {'Authorization': access_token}
# Get Team ID
result = requests.get(url + '/team', headers=headers)
data = result.json()
# See if we need to login in again.
ecode = data.get('ECODE', '')
if ecode in ['OAUTH_019', 'OAUTH_021', 'OAUTH_025', 'OAUTH_077']:
LOGGER.debug(f"User needs to login to Click Up. Again. ECODE={ecode}")
session['click_up_access_token'] = None
return _make_click_up_login()
target_team_name = _get_click_up_team_name(user_email)
team_id = None
for team in data.get('teams', []):
if team.get('name', '') == target_team_name:
team_id = team.get('id', None)
break
if team_id is None:
message = f"Could not find target team '{target_team_name}'"
LOGGER.debug(message)
return {'success': False, 'message': message}
else:
LOGGER.debug(f"Found target team '{target_team_name}' having ID {team_id}")
session['click_up_team_id'] = team_id
# Get Workspace ID
result = requests.get(url + '/team/' + team_id + '/space?archived=false', headers=headers)
data = result.json()
target_workspace_name = _get_click_up_workspace_name(user_email)
workspace_id = None
for workspace in data.get('spaces', []):
if workspace.get('name', '') == target_workspace_name:
workspace_id = workspace.get('id', None)
break
if workspace_id is None:
message = f"Could not find workspace team '{target_workspace_name}'"
LOGGER.debug(message)
return {'success': False, 'message': message}
else:
LOGGER.debug(f"Found target workspace '{target_workspace_name}' having ID {workspace_id}")
session['click_up_workspace_id'] = workspace_id
# LOGGER.debug(json.dumps(data, indent=4))
return {'success': True, 'message': 'Team ID and Workspace ID have been located and bookmarked'} | 3,526 |
def iset_servo_angle(angle_idx):
"""Set the servomotor angle to -90, -45, 0, 45 or 90 degrees.
"""
vals = [5, 9, 13, 17, 21]
servo.start(vals[angle_idx])
time.sleep(1.5)
servo.start(0) | 3,527 |
def mock_object(**params: Any) -> "Mock": # type: ignore # noqa
"""creates an object using params to set attributes
>>> option = mock_object(verbose=False, index=range(5))
>>> option.verbose
False
>>> option.index
[0, 1, 2, 3, 4]
"""
return type("Mock", (), params)() | 3,528 |
def get_words(message):
"""Get the normalized list of words from a message string.
This function should split a message into words, normalize them, and return
the resulting list. For splitting, you should split on spaces. For
normalization, you should convert everything to lowercase.
Args:
message: A string containing an SMS message
Returns:
The list of normalized words from the message.
"""
words = message.strip().split()
norm_words = [word.lower() for word in words]
# apply stop words
nonstop_words = [word for word in norm_words if not word in stop_words]
# apply stemming
stem_words = [ps.stem(word) for word in nonstop_words]
return stem_words | 3,529 |
def get_atomic_forces_card(name, **kwargs):
"""
Convert XML data to ATOMIC_FORCES card
:param name: Card name
:param kwargs: Dictionary with converted data from XML file
:return: List of strings
"""
try:
external_atomic_forces = kwargs['external_atomic_forces']
except KeyError:
logger.debug("Missing required arguments when building ATOMIC_FORCES card!")
return []
if len(external_atomic_forces ) == 0:
return []
# Warning if number of atoms in atomic positions differ with forces
try:
atomic_positions = kwargs.get('atomic_positions', {})
except KeyError:
atomic_positions = kwargs.get('crystal_positions',{})
atoms = atomic_positions.get('atom', [])
if atoms and len(atoms) != len(external_atomic_forces):
logger.error("incorrect number of atomic forces")
# Build input card text lines
lines = [name]
for forces in external_atomic_forces:
lines.append(' {0}'.format(' '.join([str(value) for value in forces])))
return lines | 3,530 |
def hasPathSum(self, root, sum):
"""
:type root: TreeNode
:type sum: int
:rtype: bool
"""
if root is None:
return False
if sum - root.val == 0 and root.left is None and root.right is None:
return True
else:
return self.hasPathSum(root.left, sum - root.val) or self.hasPathSum(root.right, sum - root.val) | 3,531 |
def _(node: IntJoin, ctx: AnnotateContext) -> BoxType:
"""All references available on either side of the Join nodes are available."""
lt = box_type(node.over)
rt = box_type(node.joinee)
t = union(lt, rt)
node.typ = t
return t | 3,532 |
def detect_global_table_updates(record):
"""This will detect DDB Global Table updates that are not relevant to application data updates. These need to be
skipped over as they are pure noise.
:param record:
:return:
"""
# This only affects MODIFY events.
if record['eventName'] == 'MODIFY':
# Need to compare the old and new images to check for GT specific changes only (just pop off the GT fields)
old_image = remove_global_dynamo_specific_fields(record['dynamodb']['OldImage'])
new_image = remove_global_dynamo_specific_fields(record['dynamodb']['NewImage'])
if json.dumps(old_image, sort_keys=True) == json.dumps(new_image, sort_keys=True):
return True
return False | 3,533 |
def fix_telecined_fades(clip: vs.VideoNode, tff: bool | int | None = None,
thr: float = 2.2) -> vs.VideoNode:
"""
A filter that gives a mathematically perfect solution to fades made *after* telecining
(which made perfect IVTC impossible). This is an improved version of the Fix-Telecined-Fades plugin
that deals with overshoot/undershoot by adding a check.
Make sure to run this *after* IVTC/deinterlacing!
If the value surpases thr * original value, it will not affect any pixels in that frame
to avoid it damaging frames it shouldn't need to. This helps a lot with orphan fields as well,
which would otherwise create massive swings in values, sometimes messing up the fade fixing.
If you pass your own float clip, you'll want to make sure to properly dither it down after.
If you don't do this, you'll run into some serious issues!
Taken from this gist and modified by LightArrowsEXE.
<https://gist.github.com/blackpilling/bf22846bfaa870a57ad77925c3524eb1>
:param clip: Input clip
:param tff: Top-field-first. `False` sets it to Bottom-Field-First.
If None, get the field order from the _FieldBased prop.
:param thr: Threshold for when a field should be adjusted.
Default is 2.2, which appears to be a safe value that doesn't
cause it to do weird stuff with orphan fields.
:return: Clip with only fades fixed
"""
def _ftf(n: int, f: List[vs.VideoFrame]) -> vs.VideoNode:
avg = (get_prop(f[0], 'PlaneStatsAverage', float),
get_prop(f[1], 'PlaneStatsAverage', float))
if avg[0] != avg[1]:
mean = sum(avg) / 2
fixed = (sep[0].std.Expr(f"x {mean} {avg[0]} / dup {thr} <= swap 1 ? *"),
sep[1].std.Expr(f"x {mean} {avg[1]} / *"))
else:
fixed = sep # type: ignore
return core.std.Interleave(fixed).std.DoubleWeave()[::2]
# I want to catch this before it reaches SeperateFields and give newer users a more useful error
if get_prop(clip.get_frame(0), '_FieldBased', int) == 0 and tff is None:
raise vs.Error("fix_telecined_fades: 'You must set `tff` for this clip!'")
elif isinstance(tff, (bool, int)):
clip = clip.std.SetFieldBased(int(tff) + 1)
clip32 = depth(clip, 32).std.Limiter()
bits = get_depth(clip)
sep = clip32.std.SeparateFields().std.PlaneStats()
sep = sep[::2], sep[1::2] # type: ignore # I know this isn't good, but frameeval breaks otherwise
ftf = core.std.FrameEval(clip32, _ftf, sep) # and I don't know how or why
if bits == 32:
warnings.warn("fix_telecined_fades: 'Make sure to dither down BEFORE setting the FieldBased prop to 0! "
"Not doing this MAY return some of the combing!'")
else:
ftf = depth(ftf, bits, dither_type=Dither.ERROR_DIFFUSION)
ftf = ftf.std.SetFieldBased(0)
return ftf | 3,534 |
def compute_statistics(provider_slug, tile_grid=get_default_tile_grid(), filename=None):
"""
:param export_task_records: ExporTaskRecords is a list of all export tasks
:param get_group: Function to generate a group id given a DataExportProviderTask
:param tile_grid: Calculate statistics for each tile in the tile grid
:param filename: Serializes the intermediate data-sample data so it can be shared btw different deployments
:return: A dict with statistics including area, duration, and package size per sq. kilometer
"""
max_estimate_export_task_records = os.getenv("MAX_ESTIMATE_EXPORT_TASK_RECORDS", 10000)
# Order by time descending to ensure more recent samples are collected first
export_task_records: QuerySet[ExportTaskRecord] = (
ExportTaskRecord.objects.filter(
export_provider_task__provider__slug=provider_slug,
status=TaskState.SUCCESS.value,
export_provider_task__status=TaskState.COMPLETED.value,
result__isnull=False,
# Only use results larger than a MB,
# anything less is likely a failure or a test.
result__size__gt=1,
)
.order_by("-finished_at")
.select_related("result", "export_provider_task__run__job", "export_provider_task__provider")
.all()[: int(max_estimate_export_task_records)]
)
processed_runs: Dict[str, Any] = {}
processed_dptr: Dict[str, Any] = {}
export_task_count = 0
total_count = len(export_task_records) # This should be the first and only DB hit.
all_stats: Dict[str, Any] = {}
logger.debug("Prefetching geometry data from all Jobs")
logger.info(f"Beginning collection of statistics for {total_count} {provider_slug} ExportTaskRecords")
runs: List[ExportRun] = list(
set([export_task_record.export_provider_task.run for export_task_record in export_task_records])
)
data_provider_task_records: List[DataProviderTaskRecord] = list(
set([export_task_record.export_provider_task for export_task_record in export_task_records])
)
default_stat = get_default_stat()
accessors = get_accessors()
global_stats = get_child_entry(all_stats, global_key, default_stat)
for run in runs:
area = get_geometry_description(run.job.the_geom)["area"]
collect_samples(run, [global_stats], ["duration", "area"], accessors, area)
for data_provider_task_record in data_provider_task_records:
area = get_geometry_description(data_provider_task_record.run.job.the_geom)["area"]
provider_stats = get_child_entry(all_stats, data_provider_task_record.provider.slug, default_stat)
collect_samples(data_provider_task_record, [provider_stats], ["duration", "area"], accessors, area)
collected_stats = collect_samples_for_export_task_records(export_task_records, copy.deepcopy(all_stats), tile_grid)
[all_stats.update(stat) for stat in collected_stats]
logger.info(
f"Computing statistics across {export_task_count} completed "
f"{provider_slug} ExportTaskRecords (geom_cache_misses={_dbg_geom_cache_misses})"
)
# TODO: Merge in any auxiliary sample data?
if filename is not None:
all_stats["timestamp"] = str(datetime.datetime.now())
with open(filename, "w") as file:
json.dump(all_stats, file)
totals: Dict[str, Union[int, dict]] = {
"run_count": len(processed_runs),
"data_provider_task_count": len(processed_dptr),
"export_task_count": export_task_count,
}
returned_totals = process_totals_concurrently(list(all_stats.keys()), copy.deepcopy(all_stats))
[totals.update(total) for total in returned_totals]
tile_count = sum([provider.get("tile_count", 0) for slug, provider in totals.items() if isinstance(provider, dict)])
logger.info("Generated statistics for %d tiles for group %s", tile_count, provider_slug)
return totals | 3,535 |
def compare_rdf(expected: Union[Graph, str], actual: Union[Graph, str], fmt: Optional[str] = "turtle") -> Optional[str]:
"""
Compare expected to actual, returning a string if there is a difference
:param expected: expected RDF. Can be Graph, file name, uri or text
:param actual: actual RDF. Can be Graph, file name, uri or text
:param fmt: RDF format
:return: None if they match else summary of difference
"""
def rem_metadata(g: Graph) -> IsomorphicGraph:
# Remove list declarations from target
for s in g.subjects(RDF.type, RDF.List):
g.remove((s, RDF.type, RDF.List))
for t in g:
if t[1] in (META.generation_date, META.source_file_date, META.source_file_size,
TYPE.generation_date, TYPE.source_file_date, TYPE.source_file_size):
g.remove(t)
g_iso = to_isomorphic(g)
return g_iso
# Bypass compare if settings have turned it off
if SKIP_RDF_COMPARE:
print(f"tests/utils/compare_rdf.py: {SKIP_RDF_COMPARE_REASON}")
return None
expected_graph = to_graph(expected, fmt)
expected_isomorphic = rem_metadata(expected_graph)
actual_graph = to_graph(actual, fmt)
actual_isomorphic = rem_metadata(actual_graph)
# Graph compare takes a Looong time
in_both, in_old, in_new = graph_diff(expected_isomorphic, actual_isomorphic)
# if old_iso != new_iso:
# in_both, in_old, in_new = graph_diff(old_iso, new_iso)
old_len = len(list(in_old))
new_len = len(list(in_new))
if old_len or new_len:
txt = StringIO()
with redirect_stdout(txt):
print("----- Missing Triples -----")
if old_len:
print_triples(in_old)
print("----- Added Triples -----")
if new_len:
print_triples(in_new)
return txt.getvalue()
return None | 3,536 |
def remove_empty_segments(args):
"""Removes empty segments from corpus component files.
"""
logging.info("Removing empty segments from corpus.")
corpus_iterator = CorpusIterator(args.corpus_dir, args.root_file)
for file_path in corpus_iterator.iter_annotated_files():
file_name = str(file_path)
remove_empty_segments_from_file(file_name)
file_name = corpus_iterator.get_component_file_name(file_path)
remove_empty_segments_from_file(str(file_name)) | 3,537 |
def start_engine(engine_name, tk, context):
"""
Creates an engine and makes it the current engine.
Returns the newly created engine object. Example::
>>> import sgtk
>>> tk = sgtk.sgtk_from_path("/studio/project_root")
>>> ctx = tk.context_empty()
>>> engine = sgtk.platform.start_engine('tk-maya', tk, ctx)
>>> engine
<Sgtk Engine 0x10451b690: tk-maya, env: shotgun>
:param engine_name: Name of the engine to launch, e.g. tk-maya
:param tk: :class:`~sgtk.Sgtk` instance to associate the engine with
:param context: :class:`~sgtk.Context` object of the context to launch the engine for.
:returns: :class:`Engine` instance
:raises: :class:`TankEngineInitError` if an engine could not be started
for the passed context.
"""
return _start_engine(engine_name, tk, None, context) | 3,538 |
def test_datasource():
"""Tests that default dataset on datasource is teams'"""
ds = GithubDataSource(name='mah_ds', domain='test_domain', organization='foorganization')
assert ds.dataset == 'teams' | 3,539 |
def parse_vaulttext(b_vaulttext):
"""Parse the vaulttext.
Args:
b_vaulttext: A byte str containing the vaulttext (ciphertext, salt,
crypted_hmac).
Returns:
A tuple of byte str of the ciphertext suitable for passing to a Cipher
class's decrypt() function, a byte str of the salt, and a byte str of the
crypted_hmac.
Raises:
AnsibleVaultFormatError: If the vaulttext format is invalid.
"""
# SPLIT SALT, DIGEST, AND DATA
try:
return _parse_vaulttext(b_vaulttext)
except AnsibleVaultFormatError:
raise
except Exception as exc:
raise AnsibleVaultFormatError(f'Vault vaulttext format error: {exc}') | 3,540 |
def does_column_exist_in_db(db, table_name, col_name):
"""Checks if a specific col exists"""
col_name = col_name.lower()
query = f"pragma table_info('{table_name}');"
all_rows = []
try:
db.row_factory = sqlite3.Row # For fetching columns by name
cursor = db.cursor()
cursor.execute(query)
all_rows = cursor.fetchall()
except sqlite3.Error as ex:
print(f'Query error, query={query} Error={ex}')
for row in all_rows:
if row['name'].lower() == col_name:
return True
return False | 3,541 |
def check_min_time(stats, timepiece):
"""检查是否产生了新的最少用时"""
if stats.time < stats.min_time:
stats.min_time = stats.time
timepiece.prep_min_time() | 3,542 |
def make_secure_val(val):
"""Takes hashed pw and adds salt; this will be the cookie"""
return '%s|%s' % (val, hmac.new(secret, val).hexdigest()) | 3,543 |
def get_ad_contents(queryset):
"""
Contents의 queryset을 받아서 preview video가 존재하는 contents를 랜덤으로 1개 리턴
:param queryset: Contents queryset
:return: contents object
"""
contents_list = queryset.filter(preview_video__isnull=False)
max_int = contents_list.count() - 1
if max_int < 0:
return
while True:
idx = random.randint(0, max_int)
contents = contents_list[idx]
if contents:
return contents | 3,544 |
def Image_CanRead(*args, **kwargs):
"""
Image_CanRead(String filename) -> bool
Returns True if the image handlers can read this file.
"""
return _core_.Image_CanRead(*args, **kwargs) | 3,545 |
def create_without_source_table_privilege(self, node=None):
"""Check that user is unable to create a table without select
privilege on the source table.
"""
user_name = f"user_{getuid()}"
table_name = f"table_{getuid()}"
source_table_name = f"source_table_{getuid()}"
exitcode, message = errors.not_enough_privileges(name=f"{user_name}")
if node is None:
node = self.context.node
with table(node, f"{source_table_name}"):
with user(node, f"{user_name}"):
try:
with When("I grant CREATE TABLE privilege to a user"):
node.query(f"GRANT CREATE TABLE ON {table_name} TO {user_name}")
with And("I grant INSERT privilege"):
node.query(f"GRANT INSERT ON {table_name} TO {user_name}")
with Then("I try to create a table without select privilege on the table"):
node.query(f"CREATE TABLE {table_name} ENGINE = Memory AS SELECT * FROM {source_table_name}", settings = [("user", f"{user_name}")],
exitcode=exitcode, message=message)
finally:
with Finally("I drop the table"):
node.query(f"DROP TABLE IF EXISTS {table_name}") | 3,546 |
def flatten(lst):
"""Flatten a list."""
return [y for l in lst for y in flatten(l)] if isinstance(lst, (list, np.ndarray)) else [lst] | 3,547 |
def calc_mean_score(movies):
"""Helper method to calculate mean of list of Movie namedtuples,
round the mean to 1 decimal place"""
ratings = [m.score for m in movies]
mean = sum(ratings) / max(1, len(ratings))
return round(mean, 1) | 3,548 |
def srun(hosts, cmd, srun_params=None):
"""Run srun cmd on slurm partition.
Args:
hosts (str): hosts to allocate
cmd (str): cmdline to execute
srun_params(dict): additional params for srun
Returns:
CmdResult: object containing the result (exit status, stdout, etc.) of
the srun command
"""
cmd = srun_str(hosts, cmd, srun_params)
try:
result = run_command(cmd, timeout=30)
except DaosTestError as error:
result = None
raise SlurmFailed("srun failed : {}".format(error))
return result | 3,549 |
def COSclustering(key, emb, oracle_num_speakers=None, max_num_speaker=8, MIN_SAMPLES=6):
"""
input:
key (str): speaker uniq name
emb (np array): speaker embedding
oracle_num_speaker (int or None): oracle number of speakers if known else None
max_num_speakers (int): maximum number of clusters to consider for each session
MIN_SAMPLES (int): minimum number of samples required for NME clustering, this avoids
zero p_neighbour_lists. Default of 6 is selected since (1/rp_threshold) >= 4.
output:
Y (List[int]): speaker labels
"""
est_num_spks_out_list = []
mat = get_eigen_matrix(emb)
if oracle_num_speakers:
max_num_speaker = oracle_num_speakers
X_conn_spkcount, rp_thres_spkcount, est_num_of_spk, lambdas, p_neigh = NMEanalysis(mat, max_num_speaker)
if emb.shape[0] > MIN_SAMPLES:
X_conn_from_dist = get_X_conn_from_dist(mat, p_neigh)
else:
X_conn_from_dist = mat
if oracle_num_speakers:
est_num_of_spk = oracle_num_speakers
est_num_spks_out_list.append([key, str(est_num_of_spk)])
# Perform spectral clustering
spectral_model = sklearn_SpectralClustering(
affinity='precomputed',
eigen_solver='amg',
random_state=0,
n_jobs=-1,
n_clusters=est_num_of_spk,
eigen_tol=1e-10,
)
Y = spectral_model.fit_predict(X_conn_from_dist)
return Y | 3,550 |
def fill_linear_layer(layer, weight, bias):
"""Load weight and bias to a given layer from onnx format."""
with torch.no_grad():
layer.weight.data = torch.from_numpy(onnx.numpy_helper.to_array(weight))
if bias is not None:
layer.bias.data = torch.from_numpy(onnx.numpy_helper.to_array(bias)) | 3,551 |
def gauss_distance(sample_set, query_set, unlabeled_set=None):
""" (experimental) function to try different approaches to model prototypes as gaussians
Args:
sample_set: features extracted from the sample set
query_set: features extracted from the query set
query_set: features extracted from the unlabeled set
"""
b, n, k, c = sample_set.size()
sample_set_std = sample_set.std(2).view(b, 1, n, c)
sample_set_mean = sample_set.mean(2).view(b, 1, n, c)
query_set = query_set.view(b, n * k, 1, c)
d = (query_set - sample_set_mean) / sample_set_std
return -torch.sum(d ** 2, 3) / np.sqrt(c) | 3,552 |
def make_mps_left(mps,truncate_mbd=1e100,split_s=False):
"""
Put an mps into left canonical form
Args:
mps : list of mps tensors
The MPS stored as a list of mps tensors
Kwargs:
truncate_mbd : int
The maximum bond dimension to which the
mps should be truncated
Returns:
mps : list of mps tensors
The resulting left-canonicalized MPS
"""
# Figure out size of mps
N = len(mps)
# Loop backwards
for site in range(N-1):
#tmpprint('\t\t\t\tSite: {}'.format(site))
mps = move_gauge_right(mps,site,
truncate_mbd=truncate_mbd,
return_ent=False,
return_wgt=False,
split_s=split_s)
# Remove empty indices at the ends of the mps
mps = remove_empty_ends(mps)
# Return results
return mps | 3,553 |
def word_after(line, word):
"""'a black sheep', 'black' -> 'sheep'"""
return line.split(word, 1)[-1].split(' ', 1)[0] | 3,554 |
def domain_in_domain(subdomain, domain):
"""Returns try if subdomain is a sub-domain of domain.
subdomain
A *reversed* list of strings returned by :func:`split_domain`
domain
A *reversed* list of strings as returned by :func:`split_domain`
For example::
>>> domain_in_domain(['com', 'example'],
... ['com', 'example', 'www'])
True"""
if len(subdomain) <= len(domain):
i = 0
for d in subdomain:
if d != domain[i]:
return False
i += 1
return True
else:
return False | 3,555 |
def polygon_from_boundary(xs, ys, xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0, xtol=0.0):
"""Polygon within box left of boundary given by (xs, ys)
xs, ys: coordinates of boundary (ys ordered increasingly)
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
xs[xs > xmax-xtol] = xmax
xs[xs < xmin+xtol] = xmin
index = -1
while xs[index] == xmin:
index -= 1
if index < -2:
xs, ys = xs[:index+2], ys[:index+2]
vertices = zip(xs, ys)
if len(xs) == 1:
vertices.append((xs[0], ymax))
vertices.append((xmin, ymax))
elif xs[-1] >= xmax-xtol:
if xs[-1] < xmax:
vertices.append((xmax, ys[-1]))
if ys[-1] < ymax:
vertices.append((xmax, ymax))
vertices.append((xmin, ymax))
elif xs[-1] > xmin:
vertices.append((xmin, ys[-1]))
if (xs[0] > xmin) or (ys[0] > ymin):
vertices.append((xmin, ymin))
if ys[0] > ymin:
vertices.append((xs[0], ymin))
vertices = np.asarray(vertices)
return vertices | 3,556 |
def print_head(df_in):
"""Print head of data set
Parameters
----------
df_in : pd.DataFrame
Dataframe
Returns
-------
head of dataset, shape of dataset
"""
df = df_in.head()
display(df)
print('Dataset shape: ', df_in.shape) | 3,557 |
def type_conversion(self, node="clickhouse1"):
"""Check the type conversion operations with DateTime64.
Cast can be set as Requirement thereby as the module
tests exactly what CAST does.
"""
self.context.node = self.context.cluster.node(node)
for scenario in loads(current_module(), Scenario):
Scenario(run=scenario) | 3,558 |
def is_degenerate(op, tol=1e-12):
"""Check if operator has any degenerate eigenvalues, determined relative
to mean spacing of all eigenvalues.
Parameters
----------
op : operator or 1d-array
Operator or assumed eigenvalues to check degeneracy for.
tol : float
How much closer than evenly spaced the eigenvalue gap has to be
to count as degenerate.
Returns
-------
n_dgen : int
Number of degenerate eigenvalues.
"""
op = np.asarray(op)
if op.ndim != 1:
evals = eigvalsh(op)
else:
evals = op
l_gaps = evals[1:] - evals[:-1]
l_tol = tol * (evals[-1] - evals[0]) / op.shape[0]
return np.count_nonzero(abs(l_gaps) < l_tol) | 3,559 |
def _get_skip_props(mo, include_operational=False, version_filter=True):
"""
Internal function to skip mo property if not to be considered for sync.
"""
skip_props = []
for prop in mo.prop_meta:
mo_property_meta = mo.prop_meta[prop]
if mo_property_meta is None:
continue
# not include operational property
if not include_operational:
if mo_property_meta.access in (MoPropertyMeta.INTERNAL,
MoPropertyMeta.READ_ONLY):
skip_props.append(prop)
# checks if property is part of current or earlier ucsm schema
if version_filter:
version = mo.get_handle().version
if version is None or version < mo_property_meta.version or \
mo_property_meta.access == MoPropertyMeta.INTERNAL:
skip_props.append(prop)
return skip_props | 3,560 |
def generate_split_problem():
"""Generates a 'Split' problem configuration.
Returns (environment, robot, start configuration, goal configuration)."""
walls = [rectangle(0, 400, 0, 10), rectangle(0, 400, 290, 300),
rectangle(0, 10, 0, 300), rectangle(390, 400, 0, 300),
rectangle(180, 220, 100, 200)]
split_environment = Environment(walls)
robot_geometry = Polygon([(-15, -15), (-15, 15), (15, 15), (15, -15)])
robot = Robot(robot_geometry)
start = np.array([50, 150, 0])
goal = np.array([350, 150, 0])
return split_environment, robot, start, goal | 3,561 |
def problem_generator(difficulty=3):
"""
This function generates mathematical expressions as string. It is not very
smart and will generate expressions that have answers the lex function
cannot accept.
"""
operators = ["/", "*", "+", "-"]
numeric_lim = difficulty * 7
output = ""
for i in range(difficulty + 3):
if i % 2 == 0:
output += str(randint(1, numeric_lim)) + " "
else:
output += operators[randint(0, len(operators) - 1)] + " "
if output[len(output) - 2] in operators:
output += str(randint(1, numeric_lim))
return output | 3,562 |
async def receive_message_and_update_deployment(app: FastAPI) -> None:
"""
Receives messages from the deployment status update queue and updates the status for
the associated resource in the state store.
Args:
app ([FastAPI]): Handle to the currently running app
"""
receive_message_gen = receive_message()
try:
async for message in receive_message_gen:
workspace_repo = WorkspaceRepository(get_db_client(app))
result = update_status_in_database(workspace_repo, message)
await receive_message_gen.asend(result)
except StopAsyncIteration: # the async generator when finished signals end with this exception.
pass | 3,563 |
def logical_factory_dimensions(params: Parameters
) -> Tuple[int, int, float]:
"""Determine the width, height, depth of the magic state factory."""
if params.use_t_t_distillation:
return 12*2, 8*2, 6 # Four T2 factories
l1_distance = params.l1_distance
l2_distance = params.code_distance
t1_height = 4 * l1_distance / l2_distance
t1_width = 8 * l1_distance / l2_distance
t1_depth = 5.75 * l1_distance / l2_distance
ccz_depth = 5
ccz_height = 6
ccz_width = 3
storage_width = 2 * l1_distance / l2_distance
ccz_rate = 1 / ccz_depth
t1_rate = 1 / t1_depth
t1_factories = int(math.ceil((ccz_rate * 8) / t1_rate))
t1_factory_column_height = t1_height * math.ceil(t1_factories / 2)
width = int(math.ceil(t1_width * 2 + ccz_width + storage_width))
height = int(math.ceil(max(ccz_height, t1_factory_column_height)))
depth = max(ccz_depth, t1_depth)
return width, height, depth | 3,564 |
def parse_list_or_range(arg):
"""
Parses a string that represents either an integer or a range in
the notation ``<start>:<step>:<stop>``.
Parameters
----------
arg : :obj:`str`
Integer or range string.
Returns
-------
int or :obj:`list` of int
Raises
------
ArgumentTypeError
If input can neither be interpreted as an integer nor a valid range.
"""
if re.match(r'^\d+:\d+:\d+$', arg) or re.match(r'^\d+:\d+$', arg):
rng_params = list(map(int, arg.split(':')))
step = 1
if len(rng_params) == 2: # start, stop
start, stop = rng_params
else: # start, step, stop
start, step, stop = rng_params
rng = list(range(start, stop + 1, step)) # include last stop-element in range
if len(rng) == 0:
raise argparse.ArgumentTypeError('{0} is an empty range'.format(arg))
return rng
elif re.match(r'^\d+$', arg):
return int(arg)
raise argparse.ArgumentTypeError(
'{0} is neither a integer list, nor valid range in the form <start>:[<step>:]<stop>'.format(
arg
)
) | 3,565 |
def data():
"""
Data providing function:
This function is separated from create_model() so that hyperopt
won't reload data for each evaluation run.
"""
d_file = 'data/zinc_100k.h5'
data_train, data_test, props_train, props_test, tokens = utils.load_dataset(d_file, "TRANSFORMER", True)
x_train = [data_train, data_train, props_train]
y_train = None
x_test = [data_test, data_test, props_test]
y_test = None
return x_train, y_train, x_test, y_test | 3,566 |
def make_commands(
script: str,
base_args: Optional[Dict[str, Any]] = None,
common_hyper_args: Optional[Dict[str, List[Any]]] = None,
algorithm_hyper_args: Optional[Dict[str, List[Any]]] = None,
) -> List[str]:
"""Generate command to run.
It will generate a list of commands to be use with the runners.
Each command will look like:
python script --base_arg_key --base_arg_val
--common_hyper_key --common_hyper_key
--algorithm_hyper_key --algorithm_hyper_key
--mutually_exclusive_args
where a separate command is generated for each common hyper_parameter and
algorithm_hyper_parameter
Parameters
----------
script: str.
String with script to run.
base_args: dict
Base arguments to execute.
common_hyper_args: dict
Iterable hyper parameters to execute in different runs.
algorithm_hyper_args
Algorithm dependent hyper parameters to execute.
Returns
-------
commands: List[str]
List with commands to execute.
"""
interpreter_script = sys.executable
base_cmd = interpreter_script + " " + script
commands = [] # List[str]
if common_hyper_args is None:
common_hyper_args = dict() # pragma: no cover
common_hyper_args = common_hyper_args.copy()
if algorithm_hyper_args is not None:
common_hyper_args.update(algorithm_hyper_args)
hyper_args_list = list(
dict(zip(common_hyper_args, x))
for x in itertools.product(*common_hyper_args.values())
)
for hyper_args in hyper_args_list:
cmd = base_cmd
for dict_ in [base_args, hyper_args]:
if dict_ is None:
continue
for key, value in dict_.items():
cmd += get_command(key, value)
commands.append(cmd)
return commands | 3,567 |
def dummy_lockfile_path(
dummy_lockfile_proto: lockfile_pb2.LockFile,
) -> pathlib.Path:
"""Yield a path to a lockfile proto."""
with tempfile.TemporaryDirectory() as d:
pbutil.ToFile(dummy_lockfile_proto, pathlib.Path(d) / "LOCK.pbtxt")
yield pathlib.Path(d) / "LOCK.pbtxt" | 3,568 |
def mongos_program(logger, job_num, executable=None, process_kwargs=None, mongos_options=None): # pylint: disable=too-many-arguments
"""Return a Process instance that starts a mongos with arguments constructed from 'kwargs'."""
args = [executable]
mongos_options = mongos_options.copy()
if "port" not in mongos_options:
mongos_options["port"] = network.PortAllocator.next_fixture_port(job_num)
suite_set_parameters = mongos_options.get("set_parameters", {})
_apply_set_parameters(args, suite_set_parameters)
mongos_options.pop("set_parameters")
# Apply the rest of the command line arguments.
_apply_kwargs(args, mongos_options)
_set_keyfile_permissions(mongos_options)
process_kwargs = make_historic(utils.default_if_none(process_kwargs, {}))
return make_process(logger, args, **process_kwargs), mongos_options["port"] | 3,569 |
def markerBeings():
"""标记众生区块
Content-Type: application/json
{
"token":"",
"block_id":""
}
返回 json
{
"is_success":bool,
"data":
"""
try:
info = request.get_json()
# 验证token
token = info["token"]
if not auth.verifyToken(token):
http_message = HttpMessage(is_success=False, data="Token无效")
return http_message.getJson()
# 获取列表
beings_block_id = info["block_id"]
if blockOfGarbage.addGarbageBlockQueue(beings_block_id):
http_message = HttpMessage(is_success=True, data="标记成功")
return http_message.getJson()
else:
http_message = HttpMessage(is_success=False, data="该区块已经被标记")
return http_message.getJson()
except Exception as err:
print(err)
http_message = HttpMessage(is_success=False, data="参数错误")
return http_message.getJson() | 3,570 |
def get_charges_with_openff(mol):
"""Starting from a openff molecule returns atomic charges
If the charges are already defined will return them without
change
I not will calculate am1bcc charges
Parameters
------------
mol : openff.toolkit.topology.Molecule
Examples
---------
from openff.toolkit.topology import Molecule
mol = Molecule.from_file(SOME_FILE)
# mol = Molecule.from_smiles(SMILES)
get_charges_with_openff(mol)
Returns
------------
np.array(float)
charges in atomic units (elementary charge)
Notes
----------
Some extra conformers may be generated because of
https://github.com/openforcefield/openff-toolkit/issues/492
"""
if (mol.partial_charges is None) or (np.allclose(
mol.partial_charges / unit.elementary_charge,
np.zeros([mol.n_particles]))):
# NOTE: generate_conformers seems to be required for some molecules
# https://github.com/openforcefield/openff-toolkit/issues/492
mol.generate_conformers(n_conformers=10)
mol.compute_partial_charges_am1bcc()
return mol.partial_charges.value_in_unit(unit.elementary_charge) | 3,571 |
def parse_mdout(file):
"""
Return energies from an AMBER ``mdout` file.
Parameters
----------
file : os.PathLike
Name of Amber output file
Returns
-------
energies : dict
A dictionary containing VDW, electrostatic, bond, angle, dihedral, V14, E14, and total energy.
"""
vdw, ele, bnd, ang, dih, v14, e14 = [], [], [], [], [], [], []
restraint = []
with open(file, "r") as f:
for line in f.readlines():
words = line.rstrip().split()
if len(words) > 1:
if "BOND" in words[0]:
bnd.append(float(words[2]))
ang.append(float(words[5]))
dih.append(float(words[8]))
if "VDWAALS" in words[0]:
vdw.append(float(words[2]))
ele.append(float(words[5]))
if "1-4" in words[0]:
v14.append(float(words[3]))
e14.append(float(words[7]))
restraint.append(float(words[10]))
energies = {
"Bond": bnd,
"Angle": ang,
"Dihedral": dih,
"V14": v14,
"E14": e14,
"VDW": vdw,
"Ele": ele,
"Restraint": restraint,
"Total": [sum(x) for x in zip(bnd, ang, dih, v14, e14, vdw, ele)],
}
return energies | 3,572 |
def import_face_recognition():
""" Import the face_recognition module only when it is required """
global face_recognition
if face_recognition is None:
import face_recognition | 3,573 |
def aa_spectrum(
G: nx.Graph, aggregation_type: Optional[List[str]] = None
) -> nx.Graph:
"""
Calculate the spectrum descriptors of 3-mers for a given protein. Contains the composition values of 8000 3-mers
:param G: Protein Graph to featurise
:type G: nx.Graph
:param aggregation_type: Aggregation types to use over chains
:type aggregation_type: List[Optional[str]]
:return: Protein Graph with aa_spectrum feature added. G.graph["aa_spectrum_{chain | aggregation_type}"]
:rtype: nx.Graph
"""
from propy.AAComposition import GetSpectrumDict
func = GetSpectrumDict
feature_name = "aa_spectrum"
return compute_propy_feature(
G,
func=func,
feature_name=feature_name,
aggregation_type=aggregation_type,
) | 3,574 |
def dumppickle(obj, fname, protocol=-1):
"""
Pickle object `obj` to file `fname`.
"""
with open(fname, 'wb') as fout: # 'b' for binary, needed on Windows
pickle.dump(obj, fout, protocol=protocol) | 3,575 |
def get_click_data(api, campaign_id):
"""Return a list of all clicks for a given campaign."""
rawEvents = api.campaigns.get(campaign_id).as_dict()["timeline"]
clicks = list() # Holds list of all users that clicked.
for rawEvent in rawEvents:
if rawEvent["message"] == "Clicked Link":
click = dict()
# Builds out click document.
click["user"] = hashlib.sha256(
rawEvent["email"].encode("utf-8")
).hexdigest()
click["source_ip"] = rawEvent["details"]["browser"]["address"]
click["time"] = rawEvent["time"]
click["application"] = get_application(rawEvent)
clicks.append(click)
return clicks | 3,576 |
def test_line_2(style_checker):
"""style_checker on python file with No_Style_Check comment on line 2.
The file has PEP8 errors, but those should be ignored thanks
to the No_Style_Check comment.
"""
p = style_checker.run_style_checker('/trunk/module', 'src/line_2.py')
style_checker.assertEqual(p.status, 0, p.image)
style_checker.assertRunOutputEmpty(p) | 3,577 |
def multi_label_column_to_binary_columns(data_frame: pd.DataFrame, column: str):
"""
assuming that the column contains array objects,
returns a new dataframe with binary columns (True/False)
indicating presence of each distinct array element.
:data_frame: the pandas DataFrame
:column: the column with array values
:return: a new DataFrame with binary columns
"""
label_unique_values = data_frame[column].str.replace(
"'", '').str.split(',').explode().to_frame()
drop_identical_values = label_unique_values[column].drop_duplicates(
keep="first").tolist()
multi_label_data_frame = pd.concat([data_frame,
pd.crosstab(label_unique_values.index,
label_unique_values[column])[drop_identical_values]], axis=1)
return multi_label_data_frame | 3,578 |
def subrepositories_changed(all_if_master: bool = False) -> List[str]: # pragma: no cover
"""
Returns a list of the final name components of subrepositories that contain files that are different between the
master branch and the current branch. Subrepositories are defined as the directories immediately under "projects"
and "libraries".
Example: if libraries/ABEX/foo/bar.py and projects/CellSignalling/bar/baz.py have changed, the result returned
would be ["ABEX", "CellSignalling"].
If the current branch *is* master, then all subrepository names (if all_if_master) or an empty list, is returned.
"master" is tried as the name of the master branch, followed by "main" if that branch does not exist. If
neither is found, which may be the case during an ADO build, we look at .git/FETCH_HEAD, which may show
evidence of the master branch having been fetched, and if so will tell us its commit ID.
"""
all_subrepos: Set[str] = set()
for path in SUBREPOSITORY_PARENT_PATHS:
for subrepo in path.glob("*"):
if subrepo.is_dir():
all_subrepos.add(subrepo.name)
repo = Repo(".")
master_branch_name = None
for branch in repo.branches:
if branch.name in ["master", "main"]:
master_branch_name = branch.name
break
if master_branch_name is None:
fh_path = Path(".git") / "FETCH_HEAD"
if fh_path.exists():
with fh_path.open() as fh:
for line in fh.readlines():
if line.find("'master'") > 0 or line.find("'main'") > 0:
# master_branch_name is actually a commit in this case
master_branch_name = line.split(None, 1)[0]
sys.stderr.write(f"Setting master 'branch' name to commit {master_branch_name}\n")
break
if master_branch_name is None:
# Play safe: master branch not found, so assume all subrepos might have changed.
sys.stderr.write("WARNING: could not find either a 'master' branch or a 'main' branch.\n")
changed = all_subrepos
else:
changed = set()
for diff in repo.index.diff(master_branch_name):
for path in [Path(diff.a_path), Path(diff.b_path)]:
parts = path.parts
if (
len(parts) >= 2
and Path(parts[0]) in SUBREPOSITORY_PARENT_PATHS
and parts[1] in all_subrepos
and parts[-1] not in BASENAMES_TO_IGNORE
):
changed.add(parts[1])
if changed:
changed = add_subrepositories_depending_on(changed, all_subrepos, get_internal_requirements)
elif all_if_master and current_commit_is_master(repo, master_branch_name):
changed = all_subrepos
# Remove subrepositories that appear to be submodules
apparent_submodules = set(path.parent.name for path in Path(".").glob("*/*/.git"))
result = [name for name in sorted(changed) if name not in apparent_submodules]
return result | 3,579 |
def show_landscape(adata,
Xgrid,
Ygrid,
Zgrid,
basis="umap",
save_show_or_return='show',
save_kwargs={},
):
"""Plot the quasi-potential landscape.
Parameters
----------
adata: :class:`~anndata.AnnData`
AnnData object that contains Xgrid, Ygrid and Zgrid data for visualizing potential landscape.
Xgrid: `numpy.ndarray`
x-coordinates of the Grid produced from the meshgrid function.
Ygrid: `numpy.ndarray`
y-coordinates of the Grid produced from the meshgrid function.
Zgrid: `numpy.ndarray`
z-coordinates or potential at each of the x/y coordinate.
basis: `str` (default: umap)
The method of dimension reduction. By default it is trimap. Currently it is not checked with Xgrid and Ygrid.
save_show_or_return: {'show', 'save', 'return'} (default: `show`)
Whether to save, show or return the figure.
save_kwargs: `dict` (default: `{}`)
A dictionary that will passed to the save_fig function. By default it is an empty dictionary and the save_fig function
will use the {"path": None, "prefix": 'show_landscape', "dpi": None, "ext": 'pdf', "transparent": True, "close":
True, "verbose": True} as its parameters. Otherwise you can provide a dictionary that properly modify those keys
according to your needs.
Returns
-------
A 3D plot showing the quasi-potential of each cell state.
"""
if "grid_Pot_" + basis in adata.uns.keys():
Xgrid_, Ygrid_, Zgrid_ = (
adata.uns["grid_Pot_" + basis]["Xgrid"],
adata.uns["grid_Pot_" + basis]["Ygrid"],
adata.uns["grid_Pot_" + basis]["Zgrid"],
)
Xgrid = Xgrid_ if Xgrid is None else Xgrid
Ygrid = Ygrid_ if Ygrid is None else Ygrid
Zgrid = Zgrid_ if Zgrid is None else Zgrid
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from matplotlib.colors import LightSource
fig = plt.figure()
ax = fig.gca(projection="3d")
# Plot the surface.
ls = LightSource(azdeg=0, altdeg=65)
# Shade data, creating an rgb array.
rgb = ls.shade(Zgrid, plt.cm.RdYlBu)
surf = ax.plot_surface(
Xgrid,
Ygrid,
Zgrid,
cmap=cm.coolwarm,
rstride=1,
cstride=1,
facecolors=rgb,
linewidth=0,
antialiased=False,
)
# Customize the z axis.
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter("%.02f"))
# Add a color bar which maps values to colors.
# fig.colorbar(surf, shrink=0.5, aspect=5)
ax.set_xlabel(basis + "_1")
ax.set_ylabel(basis + "_2")
ax.set_zlabel("U")
if save_show_or_return == "save":
s_kwargs = {"path": None, "prefix": 'show_landscape', "dpi": None,
"ext": 'pdf', "transparent": True, "close": True, "verbose": True}
s_kwargs = update_dict(s_kwargs, save_kwargs)
save_fig(**s_kwargs)
elif save_show_or_return == "show":
plt.tight_layout()
plt.show()
elif save_show_or_return == "return":
return ax | 3,580 |
def _GetBuilderPlatforms(builders, waterfall):
"""Get a list of PerfBuilder objects for the given builders or waterfall.
Otherwise, just return all platforms.
"""
if builders:
return {b for b in bot_platforms.ALL_PLATFORMS if b.name in
builders}
elif waterfall == 'perf':
return bot_platforms.OFFICIAL_PLATFORMS
elif waterfall == 'perf-fyi':
return bot_platforms.FYI_PLATFORMS
else:
return bot_platforms.ALL_PLATFORMS | 3,581 |
def meeting_guide(context):
"""
Display the ReactJS drive Meeting Guide list.
"""
settings = get_meeting_guide_settings()
json_meeting_guide_settings = json_dumps(settings)
return {
"meeting_guide_settings": json_meeting_guide_settings,
"mapbox_key": settings["map"]["key"],
"timezone": settings["timezone"],
} | 3,582 |
def generate_input_fn(file_path, shuffle, batch_size, num_epochs):
"""Generates a data input function.
Args:
file_path: Path to the data.
shuffle: Boolean flag specifying if data should be shuffled.
batch_size: Number of records to be read at a time.
num_epochs: Number of times to go through all of the records.
Returns:
A function useed by `Estimator` to read data.
"""
def _input_fn():
"""Returns features and target from input data.
Defines the input dataset, specifies how to read the data, and reads it.
Returns:
A tuple os a dictionary containing the features and the target.
"""
num_threads = multiprocessing.cpu_count()
dataset = tf.data.TextLineDataset(filenames=[file_path])
dataset = dataset.skip(1)
dataset = dataset.map(lambda x: parse_csv(
tf.expand_dims(x, -1)), num_parallel_calls=num_threads)
dataset = dataset.map(get_features_target_tuple,
num_parallel_calls=num_threads)
if shuffle:
dataset = dataset.shuffle(SHUFFLE_BUFFER_SIZE)
dataset = dataset.batch(batch_size)
dataset = dataset.repeat(num_epochs)
dataset = dataset.prefetch(1)
iterator = dataset.make_one_shot_iterator()
features, target = iterator.get_next()
return features, target
return _input_fn | 3,583 |
def amp_phase_to_complex(lookup_table):
"""
This constructs the function to convert from AMP8I_PHS8I format data to complex64 data.
Parameters
----------
lookup_table : numpy.ndarray
Returns
-------
callable
"""
_validate_lookup(lookup_table)
def converter(data):
if not isinstance(data, numpy.ndarray):
raise ValueError('requires a numpy.ndarray, got {}'.format(type(data)))
if data.dtype.name != 'uint8':
raise ValueError('requires a numpy.ndarray of uint8 dtype, got {}'.format(data.dtype.name))
if len(data.shape) == 3:
raise ValueError('Requires a three-dimensional numpy.ndarray (with band '
'in the last dimension), got shape {}'.format(data.shape))
out = numpy.zeros((data.shape[0], data.shape[1], data.shape[2]/2), dtype=numpy.complex64)
amp = lookup_table[data[:, :, 0::2]]
theta = data[:, :, 1::2]*(2*numpy.pi/256)
out.real = amp*numpy.cos(theta)
out.imag = amp*numpy.sin(theta)
return out
return converter | 3,584 |
def combine_aqs_cmaq(model, obs):
"""Short summary.
Parameters
----------
model : type
Description of parameter `model`.
obs : type
Description of parameter `obs`.
Returns
-------
type
Description of returned object.
"""
g = obs.df.groupby('Species')
comparelist = sort(obs.df.Species.unique())
dfs = []
for i in comparelist:
if (i == 'OZONE'): # & ('O3' in model.keys):
print('Interpolating Ozone:')
df = g.get_group(i)
fac = epa_util.check_cmaq_units(df, param='O3', aqs_param=i)
print(fac)
cmaq = model.get_var(lay=0, param='O3').compute() * fac
df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values,
radius=model.dset.XCELL)
# df.Obs, df.CMAQ = df.Obs, df.CMAQ
df.Units = 'PPB'
dfs.append(df)
elif i == 'PM2.5':
if ('PM25_TOT' in model.keys) | ('ASO4J' in model.keys):
print('Interpolating PM2.5:')
df = g.get_group(i)
fac = epa_util.check_cmaq_units(df, param='PM25', aqs_param=i)
cmaq = model.get_var(lay=0, param='PM25').compute() * fac
df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values,
radius=model.dset.XCELL)
dfs.append(df)
elif i == 'CO':
if 'CO' in model.keys:
print('Interpolating CO:')
df = g.get_group(i)
fac = epa_util.check_cmaq_units(df, param='CO', aqs_param=i)
cmaq = model.get_var(lay=0, param='CO').compute() * fac
df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values,
radius=model.dset.XCELL)
dfs.append(df)
elif i == 'NOY':
if 'NOY' in model.keys:
print('Interpolating NOY:')
df = g.get_group(i)
fac = epa_util.check_cmaq_units(df, param='NOY', aqs_param=i)
cmaq = model.get_var(lay=0, param='NOY').compute() * fac
df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values,
radius=model.dset.XCELL)
dfs.append(df)
elif i == 'SO2':
if 'SO2' in model.keys:
print('Interpolating SO2')
df = g.get_group(i)
fac = epa_util.check_cmaq_units(df, param='SO2', aqs_param=i)
cmaq = model.get_var(lay=0, param='SO2').compute() * fac
df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values,
radius=model.dset.XCELL)
dfs.append(df)
elif i == 'NOX':
if ('NO' in model.keys) | ('NO2' in model.keys):
print('Interpolating NOX:')
df = g.get_group(i)
fac = epa_util.check_cmaq_units(df, param='NOX', aqs_param=i)
cmaq = model.get_var(lay=0, param='NOX').compute() * fac
df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values,
radius=model.dset.XCELL)
dfs.append(df)
elif i == 'NO':
if ('NO' in model.keys):
print('Interpolating NO:')
df = g.get_group(i)
fac = epa_util.check_cmaq_units(df, param='NO', aqs_param=i)
cmaq = model.get_var(lay=0, param='NO').compute() * fac
df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values,
radius=model.dset.XCELL)
dfs.append(df)
elif i == 'NO2':
if ('NO2' in model.keys):
print('Interpolating NO2:')
df = g.get_group(i)
fac = epa_util.check_cmaq_units(df, param='NO2', aqs_param=i)
cmaq = model.get_var(lay=0, param='NO2').compute() * fac
df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values,
radius=model.dset.XCELL)
dfs.append(df)
elif i == 'SO4f':
if ('PM25_SO4' in model.keys) | ('ASO4J' in model.keys) | ('ASO4I' in model.keys):
print('Interpolating PSO4:')
df = g.get_group(i)
fac = epa_util.check_cmaq_units(df, param='SO4f', aqs_param=i)
cmaq = model.get_var(lay=0, param='SO4f').compute() * fac
df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values,
radius=model.dset.XCELL)
dfs.append(df)
elif i == 'PM10':
if ('PM_TOTAL' in model.keys) or ('ASO4K' in model.keys):
print('Interpolating PM10:')
df = g.get_group(i)
fac = epa_util.check_cmaq_units(df, param='PM10', aqs_param=i)
cmaq = model.get_var(lay=0, param='PM10').compute() * fac
df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values,
radius=model.dset.XCELL)
dfs.append(df)
elif i == 'NO3f':
if ('PM25_NO3' in model.keys) | ('ANO3J' in model.keys) | ('ANO3I' in model.keys):
print('Interpolating PNO3:')
df = g.get_group(i)
fac = epa_util.check_cmaq_units(df, param='NO3f', aqs_param=i)
cmaq = model.get_var(lay=0, param='NO3F').compute() * fac
df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values,
radius=model.dset.XCELL)
dfs.append(df)
elif i == 'ECf':
if ('PM25_EC' in model.keys) | ('AECI' in model.keys) | ('AECJ' in model.keys):
print('Interpolating PEC:')
df = g.get_group(i)
fac = epa_util.check_cmaq_units(df, param='ECf', aqs_param=i)
cmaq = model.get_var(lay=0, param='ECf').compute() * fac
df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values,
radius=model.dset.XCELL)
dfs.append(df)
elif i == 'OCf':
if ('APOCJ' in model.keys):
print('Interpolating OCf:')
df = g.get_group(i)
fac = epa_util.check_cmaq_units(df, param='OCf', improve_param=i)
cmaqvar = model.get_var(lay=0, param='OC').compute() * fac
df = interpo.interp_to_obs(cmaqvar, df, model.latitude.values, model.longitude.values,
radius=model.dset.XCELL)
dfs.append(df)
elif i == 'ETHANE':
if ('ETHA' in model.keys):
print('Interpolating Ethane:')
df = g.get_group(i)
fac = epa_util.check_cmaq_units(df, param='ETHA', aqs_param=i)
cmaq = model.get_var(lay=0, param='ETHA').compute() * fac
df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values,
radius=model.dset.XCELL)
dfs.append(df)
elif i == 'BENZENE':
if ('BENZENE' in model.keys):
print('Interpolating BENZENE:')
df = g.get_group(i)
fac = epa_util.check_cmaq_units(df, param='BENZENE', aqs_param=i)
cmaq = model.get_var(lay=0, param='BENZENE').compute() * fac
df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values,
radius=model.dset.XCELL)
dfs.append(df)
elif i == 'TOLUENE':
if ('TOL' in model.keys):
print('Interpolating Toluene:')
df = g.get_group(i)
fac = epa_util.check_cmaq_units(df, param='TOL', aqs_param=i)
cmaq = model.get_var(lay=0, param='TOL').compute() * fac
df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values,
radius=model.dset.XCELL)
dfs.append(df)
elif i == 'ISOPRENE':
if ('ISOP' in model.keys):
print('Interpolating Isoprene:')
df = g.get_group(i)
fac = epa_util.check_cmaq_units(df, param='ISOP', aqs_param=i)
cmaq = model.get_var(lay=0, param='ISOP').compute() * fac
df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values,
radius=model.dset.XCELL)
dfs.append(df)
elif i == 'O-XYLENE':
if ('XYL' in model.keys):
print('Interpolating Xylene')
df = g.get_group(i)
fac = epa_util.check_cmaq_units(df, param='XYL', aqs_param=i)
cmaq = model.get_var(lay=0, param='XYL').compute() * fac
df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values,
radius=model.dset.XCELL)
dfs.append(df)
elif i == 'WS':
if ('WSPD10' in model.keys):
print('Interpolating WS:')
df = g.get_group(i)
cmaq = model.get_var(lay=0, param='WSPD10')
df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values,
radius=model.dset.XCELL)
dfs.append(df)
elif i == 'TEMP':
if 'TEMP2' in model.keys:
print('Interpolating TEMP:')
df = g.get_group(i)
cmaq = model.get_var(lay=0, param='TEMP2')
df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values,
radius=model.dset.XCELL)
dfs.append(df)
elif i == 'WD':
if ('WDIR10' in model.keys):
print('Interpolating WD:')
df = g.get_group(i)
cmaq = model.get_var(lay=0, param='WDIR10')
df = interpo.interp_to_obs(cmaq, df, model.latitude.values, model.longitude.values,
radius=model.dset.XCELL)
dfs.append(df)
df = concat(dfs)
df.dropna(subset=['Obs', 'model'], inplace=True)
return df | 3,585 |
def bbknn_pca_matrix(
pca,
batch_list,
neighbors_within_batch=3,
n_pcs=50,
trim=None,
approx=True,
n_trees=10,
use_faiss=True,
metric="angular",
set_op_mix_ratio=1,
local_connectivity=1,
):
"""
Scanpy-independent BBKNN variant that runs on a PCA matrix and list of per-cell batch assignments instead of
an AnnData object. Non-data-entry arguments behave the same way as ``bbknn.bbknn()``.
Returns a ``(distances, connectivities, parameters)`` tuple, like what would have been stored in the AnnData object.
The connectivities are the actual neighbourhood graph.
Input
-----
pca : ``numpy.array``
PCA (or other dimensionality reduction) coordinates for each cell, with cells as rows.
batch_list : ``numpy.array`` or ``list``
A list of batch assignments for each cell.
"""
# more basic sanity checks/processing
# do we have the same number of cells in pca and batch_list?
if pca.shape[0] != len(batch_list):
raise ValueError(
"Different cell counts indicated by `pca.shape[0]` and `len(batch_list)`."
)
# convert batch_list to np.array of strings for ease of mask making later
batch_list = np.asarray([str(i) for i in batch_list])
# assert that all batches have at least neighbors_within_batch cells in there
unique, counts = np.unique(batch_list, return_counts=True)
if np.min(counts) < neighbors_within_batch:
raise ValueError(
"Not all batches have at least `neighbors_within_batch` cells in them."
)
# metric sanity checks (duplicating the ones in bbknn(), but without scanpy logging)
if approx and metric not in ["angular", "euclidean", "manhattan", "hamming"]:
print(
"unrecognised metric for type of neighbor calculation, switching to angular"
)
metric = "angular"
elif not approx and not (
metric == "euclidean"
or isinstance(metric, DistanceMetric)
or metric in KDTree.valid_metrics
):
print(
"unrecognised metric for type of neighbor calculation, switching to euclidean"
)
metric = "euclidean"
# obtain the batch balanced KNN graph
knn_distances, knn_indices = get_graph(
pca=pca,
batch_list=batch_list,
n_pcs=n_pcs,
n_trees=n_trees,
approx=approx,
metric=metric,
use_faiss=use_faiss,
neighbors_within_batch=neighbors_within_batch,
)
# sort the neighbours so that they're actually in order from closest to furthest
newidx = np.argsort(knn_distances, axis=1)
knn_indices = knn_indices[
np.arange(np.shape(knn_indices)[0])[:, np.newaxis], newidx
]
knn_distances = knn_distances[
np.arange(np.shape(knn_distances)[0])[:, np.newaxis], newidx
]
# this part of the processing is akin to scanpy.api.neighbors()
dist, cnts = compute_connectivities_umap(
knn_indices,
knn_distances,
knn_indices.shape[0],
knn_indices.shape[1],
set_op_mix_ratio=set_op_mix_ratio,
local_connectivity=local_connectivity,
)
# trimming. compute default range if absent
if trim is None:
trim = 10 * knn_distances.shape[1]
# skip trimming if set to 0, otherwise trim
if trim > 0:
cnts = trimming(cnts=cnts, trim=trim)
# create a collated parameters dictionary
# determine which neighbour computation was used, mirroring create_tree() logic
if approx:
computation = "annoy"
elif metric == "euclidean":
if "faiss" in sys.modules and use_faiss:
computation = "faiss"
else:
computation = "cKDTree"
else:
computation = "KDTree"
# we'll have a zero distance for our cell of origin, and nonzero for every other neighbour computed
params = {
"n_neighbors": len(dist[0, :].data) + 1,
"method": "umap",
"metric": metric,
"n_pcs": n_pcs,
"bbknn": {"trim": trim, "computation": computation},
}
return (dist, cnts, params) | 3,586 |
def autoinstall(ctx, **kwargs):
"""
install a system using an autofile template
"""
bg_flag = kwargs.pop('bg')
request = {'action_type': 'SUBMIT', 'job_type': 'autoinstall'}
for key in ('profile', 'template', 'verbosity'):
if kwargs[key] is None:
kwargs.pop(key)
if not kwargs['repos']:
kwargs.pop('repos')
request['parameters'] = json.dumps(kwargs)
client = Client()
job_id = wait_scheduler(client, request)
# bg flag: do not wait for output, just return to prompt
if bg_flag:
return
try:
wait_job_exec(client, job_id)
ctx.invoke(job_output, job_id=job_id)
except KeyboardInterrupt:
cancel_job = click.confirm('\nDo you want to cancel the job?')
if not cancel_job:
click.echo('warning: job is still running, remember to cancel it '
'if you want to submit a new action for this system')
raise
ctx.invoke(job_cancel, job_id=job_id) | 3,587 |
async def test_automatic_failover_after_leader_issue(ops_test: OpsTest) -> None:
"""Tests that an automatic failover is triggered after an issue happens in the leader."""
# Find the current primary unit.
primary = await get_primary(ops_test)
# Crash PostgreSQL by removing the data directory.
await ops_test.model.units.get(primary).run(f"rm -rf {STORAGE_PATH}/pgdata")
# Wait for charm to stabilise
await ops_test.model.wait_for_idle(
apps=[APP_NAME], status="active", timeout=1000, wait_for_exact_units=3
)
# Primary doesn't have to be different, but it does have to exist.
assert await get_primary(ops_test) != "None" | 3,588 |
def test_write_rows(tmpdir):
"""Test writing rows to a CSV files."""
filename = os.path.join(tmpdir, 'out.csv')
consumer = Write(file=CSVFile(filename, header=['A', 'B', 'C']))\
.open(['A', 'B', 'C'])
consumer.consume(3, [1, 2, 3])
consumer.consume(2, [4, 5, 6])
consumer.consume(1, [7, 8, 9])
consumer.close()
data = list()
with open(filename, 'r') as f:
for line in f:
data.append(line.strip())
assert data == [
'A,B,C',
'1,2,3',
'4,5,6',
'7,8,9'
] | 3,589 |
def _sanitize_anndata(adata: AnnData) -> None:
"""Sanitization and sanity checks on IR-anndata object.
Should be executed by every read_xxx function"""
assert (
len(adata.X.shape) == 2
), "X needs to have dimensions, otherwise concat doesn't work. "
# Pending updates to anndata to properly handle boolean columns.
# For now, let's turn them into a categorical with "True/False"
BOOLEAN_COLS = ("has_ir", "is_cell", "multi_chain", "high_confidence", "productive")
# explicitly convert those to categoricals. All IR_ columns that are strings
# will be converted to categoricals, too
CATEGORICAL_COLS = ("extra_chains",)
# Sanitize has_ir column into categorical
# This should always be a categorical with True / False
for col in adata.obs.columns:
if col.endswith(BOOLEAN_COLS):
adata.obs[col] = pd.Categorical(
[
"True" if _is_true2(x) else "False" if _is_false2(x) else "None"
for x in adata.obs[col]
],
categories=["True", "False", "None"],
)
elif col.endswith(CATEGORICAL_COLS) or (
col.startswith("IR_") and is_object_dtype(adata.obs[col])
):
# Turn all IR_VJ columns that are of type string or object to categoricals
# otherwise saving anndata doesn't work.
adata.obs[col] = pd.Categorical(adata.obs[col])
adata.strings_to_categoricals() | 3,590 |
def read_1d_spikes(filename):
"""Reads one dimensional binary spike file and returns a td_event event.
The binary file is encoded as follows:
* Each spike event is represented by a 40 bit number.
* First 16 bits (bits 39-24) represent the neuronID.
* Bit 23 represents the sign of spike event: 0=>OFF event, 1=>ON event.
* the last 23 bits (bits 22-0) represent the spike event timestamp in
microseconds.
Parameters
----------
filename : str
name of spike file.
Returns
-------
Event
spike event.
Examples
--------
>>> td_event = read_1d_spikes(file_path)
"""
with open(filename, 'rb') as input_file:
input_byte_array = input_file.read()
input_as_int = np.asarray([x for x in input_byte_array])
x_event = (input_as_int[0::5] << 8) | input_as_int[1::5]
c_event = input_as_int[2::5] >> 7
t_event = (
(input_as_int[2::5] << 16)
| (input_as_int[3::5] << 8)
| (input_as_int[4::5])
) & 0x7FFFFF
# convert spike times to ms
return Event(x_event, None, c_event, t_event / 1000) | 3,591 |
def _parse_step_log(lines):
"""Parse the syslog from the ``hadoop jar`` command.
Returns a dictionary which potentially contains the following keys:
application_id: a string like 'application_1449857544442_0002'. Only
set on YARN
counters: a map from counter group -> counter -> amount, or None if
no counters found (only YARN prints counters)
errors: a list of errors, with the following keys:
hadoop_error:
message: lines of error, as as string
start_line: first line of log containing the error (0-indexed)
num_lines: # of lines of log containing the error
attempt_id: ID of task attempt with this error
job_id: a string like 'job_201512112247_0003'. Should always be set
output_dir: a URI like 'hdfs:///user/hadoop/tmp/my-output-dir'. Should
always be set on success.
"""
return _parse_step_log_from_log4j_records(
_parse_hadoop_log4j_records(lines)) | 3,592 |
def is_regex(regex, invert=False):
"""Test that value matches the given regex.
The regular expression is searched against the value, so a match
in the middle of the value will succeed. To specifically match
the beginning or the whole regex, use anchor characters. If
invert is true, then matching the regex will cause the test to
fail.
"""
# pylint: disable=unused-argument # args defined by test definition
rex = re.compile(regex)
def is_regex_test(conf, path, value):
match = rex.search(value)
if invert and match:
return u'"{0}" matches /{1}/'.format(value, regex)
if not invert and not match:
return u'"{0}" does not match /{1}/'.format(value, regex)
return None
return is_regex_test | 3,593 |
def get_obj(obj):
"""Opens the url of `app_obj`, builds the object from the page and
returns it.
"""
open_obj(obj)
return internal_ui_operations.build_obj(obj) | 3,594 |
def process(frame):
"""Process initial frame and tag recognized objects."""
# 1. Convert initial frame to grayscale
grayframe = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# For every model:
for model, color, parameters in (
(MODEL_FACE, (255, 255, 0), {'scaleFactor': 1.1, 'minNeighbors': 5, 'minSize': (30, 30)}),
(MODEL_EYE, (0, 0, 255), {'scaleFactor': 1.1, 'minNeighbors': 5, 'minSize': (20, 20)}),
*((model, (0, 255, 0), {'scaleFactor': 1.1, 'minNeighbors': 5, 'minSize': (20, 20)}) for model in MODELS_PLATE),
):
# 2. Apply model, recognize objects
objects = model.detectMultiScale(grayframe, **parameters)
# 3. For every recognized object, draw a rectangle around it
for (x, y, w, h) in objects:
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2) # BGR
# 4. Return initial color frame with rectangles
return frame | 3,595 |
def tzoffset():
"""UTC to America/New_York offset."""
return datetime.timedelta(hours=5) | 3,596 |
def ssh(instance, command, plain=None, extra=None, command_args=None):
"""Run ssh command.
Parameters:
instance(MechInstance): a mech instance
command(str): command to execute (ex: 'chmod +x /tmp/file')
plain(bool): use user/pass auth
extra(str): arguments to pass to ssh
command_args(str): arguments for command
Returns:
return_code(int): 0=success
stdout(str): Output from the command
stderr(str): Error from the command
Note: May not really need the tempfile if self.use_psk==True.
Using the tempfile, there are options to not add host to the known_hosts files
which is useful, but could be MITM attacks. Not likely locally, but still
could be an issue.
"""
LOGGER.debug('command:%s plain:%s extra:%s command_args:%s',
command, plain, extra, command_args)
if instance.created:
state = instance.get_vm_state()
if vm_ready_based_on_state(state):
config_ssh = instance.config_ssh()
temp_file = tempfile.NamedTemporaryFile(delete=False)
try:
temp_file.write(config_ssh_string(config_ssh).encode('utf-8'))
temp_file.close()
cmds = ['ssh']
if not plain:
cmds.extend(('-F', temp_file.name))
if not plain:
cmds.append(config_ssh['Host'])
if extra:
cmds.append(extra)
if command:
cmds.extend(('--', command))
if command_args:
cmds.append(command_args)
LOGGER.debug('cmds:%s', cmds)
# if running a script
if command:
result = subprocess.run(cmds, capture_output=True)
stdout = result.stdout.decode('utf-8').strip()
stderr = result.stderr.decode('utf-8').strip()
return result.returncode, stdout, stderr
else:
# interactive
return subprocess.call(cmds), None, None
finally:
os.unlink(temp_file.name)
else:
return 1, '', 'VM not ready({})'.format(state) | 3,597 |
def KICmag(koi,band):
"""
Returns the apparent magnitude of given KOI star in given band. returns KICmags(koi)[band]
"""
return KICmags(koi)[band] | 3,598 |
def list_to_decimal(nums: List[int]) -> int:
"""Accept a list of positive integers in the range(0, 10)
and return a integer where each int of the given list represents
decimal place values from first element to last. E.g
[1,7,5] => 175
[0,3,1,2] => 312
Place values are 10**n where n represents the digit position
Eg to calculate 1345, we have 5 1's, 4 10's, 3 100's and 1 1000's
1, 3 , 4 , 5
1000's, 100's, 10's, 1's
"""
for num in nums:
if isinstance(num, bool) or not isinstance(num, int):
raise TypeError
elif not num in range(0, 10):
raise ValueError
return int("".join(map(str, nums))) | 3,599 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.