content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def _daily_prevalence(data):
"""
Returns a series where each value is a true fraction of currently infected population.
Args:
(dict): tracker data loaded from pkl file.
Returns:
(np.array): 1D array where each value is the above described fraction
"""
n_infected_per_day = data['ei_per_day']
n_people = data['n_humans']
prevalence = np.array(n_infected_per_day) / n_people
return prevalence | 2,500 |
def _pytest_deselected(items):
"""
pytest has a hookspec pytest_deselected for accessing the deselected tests.
Example: add this code to conftest.py in your test root dir
Running the tests now will give you an output similar to this:
$ pytest -vv
...
plugins: cov-2.8.1, asyncio-0.10.0
collecting ...
deselected: test_spam.py::test_spam
deselected: test_spam.py::test_bacon
deselected: test_spam.py::test_ham
collected 4 items / 3 deselected / 1 selected
"""
if not items:
return
config = items[0].session.config
reporter = config.pluginmanager.getplugin("terminalreporter")
reporter.ensure_newline()
for item in items:
reporter.line(f"deselected: {item.nodeid}", yellow=True, bold=True) | 2,501 |
def open_serial_ports(serial_ports):
""" Open all the serial ports in the list. Used when the GUI is first opened. """
global OPEN_SERIAL_PORTS
try:
for s in serial_ports:
OPEN_SERIAL_PORTS.append(serial.Serial(s, SERIAL_SPEED, write_timeout=0, timeout=0))
except (OSError, serial.SerialException) as e:
if USE_CONFIG_JSON:
print("\nNot all serial ports were detected. Check config.json for accuracy.\n\n%s" % e)
raise Exception(e) | 2,502 |
def test_memoryfile_incr_init(data_coutwildrnp_json):
"""In-memory GeoJSON file written in 2 parts can be read"""
with MemoryFile() as memfile:
memfile.write(data_coutwildrnp_json[:1000])
memfile.write(data_coutwildrnp_json[1000:])
with memfile.open() as collection:
assert len(collection) == 67 | 2,503 |
def users_view(page):
"""
The user view page
Returns:
a rendered user view template
"""
user_search = request.args.get("search")
user_role = request.args.get("user_role")
users_query = model.User.query
if user_search:
term = "%" + user_search + "%"
users_query = users_query.filter(
or_(model.User.name.ilike(term), model.User.username.ilike(term))
)
if user_role and user_role != "all":
users_query = users_query.join(model.User.user_roles).filter(
model.UserRole.name == user_role
)
users_pagination = util.paginate(users_query, page, 30)
users = users_pagination.items
metrics = {}
for user in users:
user_metrics = {}
run_query = model.Run.query.filter_by(user_id=user.id)
user_metrics["num_runs"] = run_query.count()
user_metrics["last_run"] = run_query.order_by(
model.Run.submit_time.desc()
).limit(
1
).first()
metrics[user.id] = user_metrics
return render_template(
"users/view.html",
users_pagination=users_pagination,
users=users,
metrics=metrics,
user_role=user_role,
search=user_search,
) | 2,504 |
def serial_christie_power_state(connection):
"""Ask a Christie projector for its power state and parse the response"""
connection.reset_input_buffer()
response = serial_send_command(connection, "(PWR?)", char_to_read=21)
result = None
if len(response) > 0:
if "PWR!001" in response:
result = "on"
if "PWR!000" in response:
result = "off"
if "PWR!010" in response:
result = "powering_off"
if "PWR!011" in response:
result = "powering_on"
return result | 2,505 |
def printHeader(args, header, sanity):
"""
printHeader(header): Print up our headers
"""
text = header["header_text"]
print("Header")
print("======")
if args.text:
print("")
print(" Request ID: %s" % header["request_id"])
print(" Questions: %d" % int(header["num_questions"]))
print(" Answers: %d" % int(header["num_answers"]))
print(" Authority records: %d" % (int(header["num_authority_records"])))
print(" Additional records: %d" % (int(header["num_additional_records"])))
print(" QR: %s" % text["qr"])
print(" AA: %s" % text["aa"])
print(" TC: %s" % text["tc"])
print(" RD: %s" % text["rd"])
print(" RA: %s" % text["ra"])
print(" OPCODE: %d - %s" % (header["header"]["opcode"], text["opcode_text"]))
print(" RCODE: %d - %s" % (header["header"]["rcode"], text["rcode_text"]))
#
# Print a graph right out of RFC 1035, section 4.1.1
#
if args.graph:
print("")
print(" 1 1 1 1 1 1")
print(" 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5")
print(" +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+")
print(" | Request ID: %s |" % header["request_id"])
print(" +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+")
print(" |%s| Opcode: %d |%s|%s|%s|%s| Z: %d | RCODE: %d |" % (
"QR" if header["header"]["qr"] else " ",
header["header"]["opcode"],
"AA" if header["header"]["aa"] else " ",
"TC" if header["header"]["tc"] else " ",
"RD" if header["header"]["rd"] else " ",
"RA" if header["header"]["ra"] else " ",
header["header"]["z"],
header["header"]["rcode"],
))
print(" +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+")
print(" | Question Count: %d |" % int(header["num_questions"]))
print(" +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+")
print(" | Answer Count: %d |" % int(header["num_answers"]))
print(" +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+")
print(" | Authority/Nameserver Count: %d |" % int(header["num_authority_records"]))
print(" +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+")
print(" | Additional Records Count: %d |" % int(header["num_additional_records"]))
print(" +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+")
if len(sanity):
for warning in sanity:
print(" WARNING: %s" % warning) | 2,506 |
def cuda_tanh(a):
""" Hyperbolic tangent of GPUArray elements.
Parameters:
a (gpu): GPUArray with elements to be operated on.
Returns:
gpu: tanh(GPUArray)
Examples:
>>> a = cuda_tanh(cuda_give([0, pi / 4]))
array([ 0., 0.6557942])
>>> type(a)
<class 'pycuda.gpuarray.GPUArray'>
"""
return pycuda.cumath.tanh(a) | 2,507 |
def normalize_multi_header(df):
"""将有MultiIndex的column字符串做标准化处理,去掉两边空格等"""
df_copy = df.copy()
df_copy_columns = [ tuple(y.strip().lower() for y in x) for x in df_copy.columns ]
df_copy.columns = pd.core.index.MultiIndex.from_tuples(df_copy_columns)
return df_copy | 2,508 |
def scapy_packet_Packet_hasflag(self, field_name, value):
"""Is the specified flag value set in the named field"""
field, val = self.getfield_and_val(field_name)
if isinstance(field, EnumField):
if val not in field.i2s:
return False
return field.i2s[val] == value
else:
return (1 << field.names.index([value])) & self.__getattr__(field_name) != 0 | 2,509 |
def grid_points_2d(length, width, div, width_div=None):
"""Returns a regularly spaced grid of points occupying a rectangular
region of length x width partitioned into div intervals. If different
spacing is desired in width, then width_div can be specified, otherwise
it will default to div. If div < 2 in either x or y, then the corresponding
coordinate will be set to length or width respectively."""
if div > 1:
px = [-length / 2.0 + (x / (div - 1)) * length for x in range(div)]
else:
px = [length]
if width_div is not None:
wd = width_div
else:
wd = div
if wd > 1:
py = [-width / 2.0 + (y / (wd - 1)) * width for y in range(wd)]
else:
py = [width]
pts = []
for x in px:
for y in py:
pts.append((x, y))
return pts | 2,510 |
def analytic_gradient(circuit, parameter=None):
"""Return the analytic gradient of the input circuit."""
if parameter is not None:
if parameter not in circuit.parameters:
raise ValueError('Parameter not in this circuit.')
if len(circuit._parameter_table[parameter]) > 1:
raise NotImplementedError('No product rule support yet, params must be unique.')
summands, op_context = [], []
for i, op in enumerate(circuit.data):
gate = op[0]
op_context += [op[1:]]
if (parameter is None and len(gate.params) > 0) or parameter in gate.params:
summands += [gradient_lookup(gate)]
else:
summands += [[[1, gate]]]
gradient = []
for product_rule_term in itertools.product(*summands):
summand_circuit = QuantumCircuit(*circuit.qregs)
coeff = 1
for i, a in enumerate(product_rule_term):
coeff *= a[0]
summand_circuit.data.append([a[1], *op_context[i]])
gradient += [[coeff, summand_circuit.copy()]]
return gradient | 2,511 |
def _get_shipping_voucher_discount_for_cart(voucher, cart):
"""Calculate discount value for a voucher of shipping type."""
if not cart.is_shipping_required():
msg = pgettext(
'Voucher not applicable',
'Your order does not require shipping.')
raise NotApplicable(msg)
shipping_method = cart.shipping_method
if not shipping_method:
msg = pgettext(
'Voucher not applicable',
'Please select a shipping method first.')
raise NotApplicable(msg)
# check if voucher is limited to specified countries
shipping_country = cart.shipping_address.country
if voucher.countries and shipping_country.code not in voucher.countries:
msg = pgettext(
'Voucher not applicable',
'This offer is not valid in your country.')
raise NotApplicable(msg)
return get_shipping_voucher_discount(
voucher, cart.get_subtotal(), shipping_method.get_total()) | 2,512 |
async def test_async_delete_authors(aresponses, readarr_client: ReadarrClient) -> None:
"""Test editing authors."""
aresponses.add(
"127.0.0.1:8787",
f"/api/{READARR_API}/author/0",
"DELETE",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
),
match_querystring=True,
)
await readarr_client.async_delete_authors(0)
aresponses.add(
"127.0.0.1:8787",
f"/api/{READARR_API}/author/editor",
"DELETE",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
),
match_querystring=True,
)
await readarr_client.async_delete_authors([0, 1]) | 2,513 |
def ottawa(location, **kwargs):
"""Ottawa Provider
:param location: Your search location you want geocoded.
"""
return get(location, provider='ottawa', **kwargs) | 2,514 |
def entries():
""" Basic data for a test case """
return copy.deepcopy(
{"arb_key": "text", "randn": random.randint(0, 10),
"nested": {"ntop": 0, "nmid": {"list": ["a", "b"]},
"lowest": {"x": {"a": -1, "b": 1}}},
"collection": {1, 2, 3}}) | 2,515 |
def count_ref_alleles(variant, *traits):
"""Count reference allels for a variant
Parameters
----------
variant : a Variant as from funcgenom
the variant for which alleles should be counted
*traits : str
the traits for which alleles should be counted
Returns
-------
int
the reference allele count
"""
return (
''.join(variant.traits[trait]['alleles'] for trait in traits)
.replace(',', '.')
.count('.')
) | 2,516 |
def preprocess_image(image, image_sz=48):
"""
Preprocess an image. Most of this is stuff that needs to be done for the Keras CNN model to work,
as recommended by: https://chsasank.github.io/keras-tutorial.html
"""
# we need to convert to saturation, and value (HSV) coordinates
hsv_image = color.rgb2hsv(image)
hsv_image[:, :, 2] = exposure.equalize_hist(hsv_image[:, :, 2])
image = color.hsv2rgb(hsv_image)
# we have to crop to central square
min_side = min(image.shape[:-1])
centre = image.shape[0] // 2, image.shape[1] // 2
image = image[centre[0] - min_side // 2:centre[0] + min_side // 2, centre[1] - min_side // 2:centre[1] + min_side // 2, :]
# our model _needs_ images that are all the same size
image = transform.resize(image, (image_sz, image_sz))
# change colour axis
image = np.rollaxis(image, -1)
return image | 2,517 |
def pspace_independent(a, b):
"""
Tests for independence between a and b by checking if their PSpaces have
overlapping symbols. This is a sufficient but not necessary condition for
independence and is intended to be used internally.
Notes
=====
pspace_independent(a, b) implies independent(a, b)
independent(a, b) does not imply pspace_independent(a, b)
"""
a_symbols = set(pspace(b).symbols)
b_symbols = set(pspace(a).symbols)
if len(set(random_symbols(a)).intersection(random_symbols(b))) != 0:
return False
if len(a_symbols.intersection(b_symbols)) == 0:
return True
return None | 2,518 |
def img_aspect_ratio(width, height):
"""
Returns an image's aspect ratio.
If the image has a common aspect ratio, returns the aspect ratio in the format x:y,
otherwise, just returns width/height.
"""
ratio = round(width/height, 2)
for ar, val in COMMON_ASPECT_RATIOS.items():
if ratio <= val + 0.01 and ratio >= val - 0.01:
ratio = ar
break
return ratio | 2,519 |
def _get_active_sculpting_mesh_for_deformer(deformer):
"""
If sculpting is enabled on the deformer, return the output mesh. Otherwise,
return None.
"""
# If sculpting is enabled, .tweak[0] will be connected to the .tweakLocation of
# a mesh.
connections = cmds.listConnections('%s.tweak[0]' % deformer, d=True, s=False) or []
if len(connections) == 0:
return None
if len(connections) > 1:
# This isn't expected.
raise RuntimeError('More than one mesh points to %s.tweak[0]' % deformer)
return connections[0] | 2,520 |
def get_client_versions():
"""Gets the client versions (or client equivalent for server).
Returns:
A list of client versions (or client equivalent for server).
E.g. '10' for Windows 10 and Windows Server 2016.
"""
version_nubmer = get_os_version_number()
if version_nubmer in _WIN32_CLIENT_NAMES:
return [_WIN32_CLIENT_NAMES[version_nubmer]]
return [] | 2,521 |
def PV_property(name,default_value=nan):
"""EPICS Channel Access Process Variable as class property"""
def prefix(self):
prefix = ""
if hasattr(self,"prefix"): prefix = self.prefix
if hasattr(self,"__prefix__"): prefix = self.__prefix__
if prefix and not prefix.endswith("."): prefix += "."
return prefix
def get(self):
from CA import caget
value = caget(prefix(self)+name.upper())
if value is None: value = default_value
if type(value) != type(default_value):
if type(default_value) == list: value = [value]
else:
try: value = type(default_value)(value)
except: value = default_value
return value
def set(self,value):
from CA import caput
value = caput(prefix(self)+name.upper(),value)
return property(get,set) | 2,522 |
def _stream_files(curr_header, fn, mesos_files):
"""Apply `fn` in parallel to each file in `mesos_files`. `fn` must
return a list of strings, and these strings are then printed
serially as separate lines.
`curr_header` is the most recently printed header. It's used to
group lines. Each line has an associated header (e.g. a string
representation of the MesosFile it was read from), and we only
print the header before printing a line with a different header
than the previous line. This effectively groups lines together
when the have the same header.
:param curr_header: Most recently printed header
:type curr_header: str
:param fn: function that reads a sequence of lines from a MesosFile
:type fn: MesosFile -> [str]
:param mesos_files: files to read
:type mesos_files: [MesosFile]
:returns: Returns the most recently printed header, and a list of
files that are still reachable. Once we detect a file is
unreachable, we stop trying to read from it.
:rtype: (str, [MesosFile])
"""
reachable_files = list(mesos_files)
# TODO switch to map
for job, mesos_file in util.stream(fn, mesos_files):
try:
lines = job.result()
except DCOSException as e:
# The read function might throw an exception if read.json
# is unavailable, or if the file doesn't exist in the
# sandbox. In any case, we silently remove the file and
# continue.
logger.exception("Error reading file: {}".format(e))
reachable_files.remove(mesos_file)
continue
if lines:
curr_header = _output(curr_header,
len(reachable_files) > 1,
six.text_type(mesos_file),
lines)
return curr_header, reachable_files | 2,523 |
def supported_platform(logger):
"""Checks if this script is running on supported platform.
Args:
logger: A valid logger instance to log debug/error messages.
Returns:
True if this platform is supported.
"""
# TODO(billy): Look into supporting Windows in the near future.
logger.debug("Current platform: {}".format(sys.platform))
if not (sys.platform == "linux" or sys.platform == "darwin"):
logger.error("Sorry, your OS is currently unsupported for this script.")
return False
if not (sys.version_info.major == 3 and sys.version_info.minor >= 5):
logger.error("This script requires Python 3.5 or higher!")
logger.error("You are using Python {}.{}.".format(sys.version_info.major,
sys.version_info.minor))
return False
return True | 2,524 |
def random_in_range(a: int, b: int) -> int:
""" Return a random number r with a <= r <= b. """
return random.randint(a, b) | 2,525 |
def count_items():
"""
:returns: a dictionary with counts in fields 'total', 'done'.
"""
con = sqlite3.connect(PROGRESS_DB_FILE_NAME)
cur = con.cursor()
# do not count root
cur.execute("SELECT COUNT(*) FROM item WHERE pk<>0")
total = cur.fetchone()[0]
cur.execute("SELECT COUNT(*) FROM item WHERE is_done='TRUE' AND pk<>0")
done = cur.fetchone()[0]
done_items = load_items(is_done=True)
done_today = 0
done_yesterday = 0
for i in done_items:
date_item = datetime.strptime(i.done_at, DATE_FORMAT)
date_now = datetime.now()
if date_now.date() == date_item.date():
done_today += 1
if (date_now.date() - date_item.date()).days == 1:
done_yesterday += 1
return {
'done': done,
'total': total,
'done_today': done_today,
'done_yesterday': done_yesterday,
} | 2,526 |
def app() -> None:
"""This app renders the Data Analyzer page"""
# TEXT:
st.write(
"""
# Data Analysis Dashboard
Please provide an asset name to display historical data.
"""
)
# INPUTs:
st.sidebar.title("Parameters")
col1, col2, col3 = st.columns(3)
with col1:
asset_class = st.selectbox(
label="Choose an asset class", options=AssetClasses.list()
)
# asset_ticker = st.text_input(label="Enter an asset ticker", value="AAPL")
# PROCESSING:
if asset_class == AssetClasses.STOCKS.value:
@st.cache(persist=True)
def get_global_stocks(hundred_results: int = 10) -> pd.DataFrame:
"""Get company name, ticker and country of top companies based on market cap.
By default returns 1000 biggest companies, max is 5800.
"""
return scrape_largest_companies(num_pages=hundred_results)
number_of_stocks = st.sidebar.number_input(
label="Number of stocks", min_value=100, max_value=5800, value=1000
)
period = st.sidebar.selectbox(
label="Period",
options=[
"max",
"ytd",
"10y",
"5y",
"2y",
"1y",
"1d",
"5d",
"1mo",
"3mo",
"6mo",
],
)
interval = st.sidebar.selectbox(
label="Interval",
options=[
"1d",
"1h",
"5d",
"1wk",
"1mo",
"3mo",
],
)
with st.spinner("Getting companies..."):
companies_df = get_global_stocks(
hundred_results=int(np.ceil(number_of_stocks / 100))
)
with col2:
country = st.selectbox(
label="Choose a country", options=companies_df["country"].unique()
)
with col3:
stock_name = st.selectbox(
label="Choose a stock",
options=companies_df.loc[companies_df["country"] == str(country)][
"name"
],
)
stock = Stock(
name=str(stock_name),
ticker=companies_df.loc[companies_df["name"] == str(stock_name)][
"ticker"
].iloc[0],
country=companies_df.loc[companies_df["name"] == str(stock_name)][
"country"
].iloc[0],
)
@st.cache(persist=True, allow_output_mutation=True)
def get_prices(
stock: Stock, period: str = "max", interval: str = "1d"
) -> pd.DataFrame:
"""Get prices from Yahoo Finance"""
return yf.Ticker(ticker=stock.ticker).history(
period=period, interval=interval
)
with st.spinner("Getting prices..."):
stock.prices = get_prices(stock=stock, period=period, interval=interval)
@st.cache(persist=True, allow_output_mutation=True)
def get_info(stock: Stock) -> dict:
return yf.Ticker(ticker=stock.ticker).info
@st.cache(persist=True, allow_output_mutation=True)
def get_news(stock: Stock) -> dict:
return yf.Ticker(ticker=stock.ticker).news
else:
raise NotImplementedError("Not implemented yet.")
# OUTPUT:
with st.spinner("Getting company info..."):
info = get_info(stock=stock)
st.write(
"""
## Business Summary
"""
)
with st.expander("See business description"):
st.write(info["longBusinessSummary"])
col_1, col_2, col_3 = st.columns(3)
with col_1:
st.write("**Ticker**: ", stock.ticker)
st.write("**Website**: ", info["website"])
with col_2:
st.write("**Sector**: ", info["sector"])
st.write("**Industry**: ", info["industry"])
with col_3:
st.write(
"**Number of shares**: ",
str(round(info["sharesOutstanding"] / 1e6, 2)),
"milions",
)
st.write("**Market beta**: ", str(round(info["beta"], 2)))
st.write("## Prices")
st.line_chart(stock.prices[["Open", "High", "Low", "Close"]])
st.line_chart(stock.prices["Volume"])
if interval != "1h":
try:
stock.prices.index = stock.prices.index.date
except AttributeError:
pass
st.dataframe(stock.prices)
# st.write(get_info(stock=stock))
news: list[dict] = get_news(stock=stock)
st.write("## Related news:")
for n in news:
st.markdown(f"[{n.get('title')}]({n.get('link')})") | 2,527 |
def plot_sparsity(results):
"""Function to visualize the sparsity-accuracy trade-off of regularized decision
layers
Args:
results (dictionary): Appropriately formatted dictionary with regularization
paths and logs of train/val/test accuracy.
"""
if type(results['metrics']['acc_train'].values[0]) == list:
all_tr = 100 * np.array(results['metrics']['acc_train'].values[0])
all_val = 100 * np.array(results['metrics']['acc_val'].values[0])
all_te = 100 * np.array(results['metrics']['acc_test'].values[0])
else:
all_tr = 100 * np.array(results['metrics']['acc_train'].values)
all_val = 100 * np.array(results['metrics']['acc_val'].values)
all_te = 100 * np.array(results['metrics']['acc_test'].values)
fig, axarr = plt.subplots(1, 2, figsize=(14, 5))
axarr[0].plot(all_tr)
axarr[0].plot(all_val)
axarr[0].plot(all_te)
axarr[0].legend(['Train', 'Val', 'Test'], fontsize=16)
axarr[0].set_ylabel("Accuracy (%)", fontsize=18)
axarr[0].set_xlabel("Regularization index", fontsize=18)
num_features = results['weights'][0].shape[1]
total_sparsity = np.mean(results['sparsity'], axis=1) / num_features
axarr[1].plot(total_sparsity, all_tr, 'o-')
axarr[1].plot(total_sparsity, all_te, 'o-')
axarr[1].legend(['Train', 'Val', 'Test'], fontsize=16)
axarr[1].set_ylabel("Accuracy (%)", fontsize=18)
axarr[1].set_xlabel("1 - Sparsity", fontsize=18)
axarr[1].set_xscale('log')
plt.show() | 2,528 |
def warning_test():
"""For testing warning function."""
# Should show warnings in order and only HAPIWarning {1,2} should
# have a different format
from warnings import warn
warn('Normal warning 1')
warn('Normal warning 2')
warning('HAPI Warning 1')
warning('HAPI Warning 2')
warn('Normal warning 3')
warn('Normal warning 4') | 2,529 |
def valida_cnpj(cnpj):
"""
Valida CNPJs, retornando apenas a string de números válida.
# CNPJs errados
>>> validar_cnpj('abcdefghijklmn')
False
>>> validar_cnpj('123')
False
>>> validar_cnpj('')
False
>>> validar_cnpj(None)
False
>>> validar_cnpj('12345678901234')
False
>>> validar_cnpj('11222333000100')
False
# CNPJs corretos
>>> validar_cnpj('11222333000181')
'11222333000181'
>>> validar_cnpj('11.222.333/0001-81')
'11222333000181'
>>> validar_cnpj(' 11 222 333 0001 81 ')
'11222333000181'
"""
cnpj = ''.join(re.findall('\d', str(cnpj)))
if (not cnpj) or (len(cnpj) < 14):
return False
# Pega apenas os 12 primeiros dígitos do CNPJ e
# gera os 2 dígitos que faltam
inteiros = list(map(int, cnpj))
novo = inteiros[:12]
prod = [5, 4, 3, 2, 9, 8, 7, 6, 5, 4, 3, 2]
while len(novo) < 14:
r = sum([x*y for (x, y) in zip(novo, prod)]) % 11
if r > 1:
f = 11 - r
else:
f = 0
novo.append(f)
prod.insert(0, 6)
# Se o número gerado coincidir com o número original, é válido
if novo == inteiros:
return cnpj
return False | 2,530 |
def km_miles(kilometers):
"""Usage: Convert kilometers to miles"""
return kilometers/1.609 | 2,531 |
def create_remote(accessory):
"""Define characteristics for a button (that is inn a group)."""
service_label = accessory.add_service(ServicesTypes.SERVICE_LABEL)
char = service_label.add_char(CharacteristicsTypes.SERVICE_LABEL_NAMESPACE)
char.value = 1
for i in range(4):
button = accessory.add_service(ServicesTypes.STATELESS_PROGRAMMABLE_SWITCH)
button.linked.append(service_label)
char = button.add_char(CharacteristicsTypes.INPUT_EVENT)
char.value = 0
char.perms = ["pw", "pr", "ev"]
char = button.add_char(CharacteristicsTypes.NAME)
char.value = f"Button {i + 1}"
char = button.add_char(CharacteristicsTypes.SERVICE_LABEL_INDEX)
char.value = i
battery = accessory.add_service(ServicesTypes.BATTERY_SERVICE)
battery.add_char(CharacteristicsTypes.BATTERY_LEVEL) | 2,532 |
def is_enterprise_learner(user):
"""
Check if the given user belongs to an enterprise. Cache the value if an enterprise learner is found.
Arguments:
user (User): Django User object.
Returns:
(bool): True if given user is an enterprise learner.
"""
cached_is_enterprise_key = get_is_enterprise_cache_key(user.id)
if cache.get(cached_is_enterprise_key):
return True
if EnterpriseCustomerUser.objects.filter(user_id=user.id).exists():
# Cache the enterprise user for one hour.
cache.set(cached_is_enterprise_key, True, 3600)
return True
return False | 2,533 |
def point_cloud(depth, colors):
"""Transform a depth image into a point cloud with one point for each
pixel in the image, using the camera transform for a camera
centred at cx, cy with field of view fx, fy.
depth is a 2-D ndarray with shape (rows, cols) containing
depths from 1 to 254 inclusive. The result is a 3-D array with
shape (rows, cols, 3). Pixels with invalid depth in the input have
NaN for the z-coordinate in the result.
"""
rows, cols = depth.shape
c, r = np.meshgrid(np.arange(cols), np.arange(rows), sparse=True)
valid = (depth > 0) & (depth < 255)
z = np.where(valid, depth / 256.0, np.nan)
x = np.where(valid, z * (c - cx) / fx, 0)
y = np.where(valid, z * (r - cy) / fy, 0)
points = np.dstack((x, y, z))
print('points:{}, colors:{}'.format(np.shape(points), np.shape(colors)))
reflect_matrix = np.identity(3) # reflect on x axis
reflect_matrix[0] *= -1
points = np.matmul(points, reflect_matrix)
out_fn = 'point_cloud.ply'
# filter by min disparity
mask = disparity > disparity.min()
out_points = points[mask]
out_colors = colors[mask]
idx = np.fabs(out_points[:, -1]) < 50 # 10.5 # filter by dimension
print('out_points:{}'.format(np.shape(out_points)))
out_points = out_points[idx]
out_colors = out_colors.reshape(-1, 3)
out_colors = out_colors[idx]
write_ply(out_fn, out_points, out_colors)
# reproject on the image -----------------------------------
reflected_pts = np.matmul(out_points, reflect_matrix)
projected_img, _ = cv2.projectPoints(reflected_pts, np.identity(3), np.array([0., 0., 0.]), K_left, D_left)
projected_img = projected_img.reshape(-1, 2)
blank_img = np.zeros(colors.shape, 'uint8')
img_colors = colors[mask][idx].reshape(-1, 3)
for i, pt in enumerate(projected_img):
pt_x = int(pt[0])
pt_y = int(pt[1])
if pt_x > 0 and pt_y > 0:
# use the BGR format to match the original image type
col = (int(img_colors[i, 2]), int(img_colors[i, 1]), int(img_colors[i, 0]))
cv2.circle(blank_img, (pt_x, pt_y), 1, col)
return blank_img, out_points | 2,534 |
def dict_merge(dct, merge_dct):
""" Recursive dict merge. Inspired by :meth:``dict.update()``, instead of
updating only top-level keys, dict_merge recurses down into dicts nested
to an arbitrary depth, updating keys. The ``merge_dct`` is merged into
``dct``.
:param dct: dict onto which the merge is executed
:param merge_dct: dct merged into dct
:return: None
source: https://gist.github.com/angstwad/bf22d1822c38a92ec0a9
"""
import collections
for k, v in merge_dct.items():
if (k in dct and isinstance(dct[k], dict)
and isinstance(merge_dct[k], collections.Mapping)):
dict_merge(dct[k], v)
else:
dct[k] = v | 2,535 |
def structural_email(data, pos_parser=True, bytedata_parser_threshold=50, reference_parser_match_type=2):
"""
This is a parser pipeline, parser order matters.
1. string => structure email to separate => header, body, others
2. body => remove typo and some irrelevant words => body
3. body => parse and remove email from body => body_no_email
4. body_no_email => parse and remove binary data like BMP or picture from body => body_no_binary_no_email
5. body_no_binary_no_email => separate email reference and reply => reply, previous_one, previous_two
@param data: data text series including all the training set or test set
@return: structural information
"""
print("Preprocessing for unstructure email...")
header_info = []
body_info = []
others_info = []
tag_info = []
for string in tqdm(data):
# structure parsers
header, body, others = structure_parser(string)
body = typo_parser(body)
body_no_email, emails = email_address_parser(body)
body_no_binary_no_email, bytedata = bytedata_parser(body_no_email, threshold=bytedata_parser_threshold)
# main parser
reply, previous_one, previous_two = reference_parser(body_no_binary_no_email, match_type=reference_parser_match_type)
if pos_parser:
target_tag = set(['NN', 'NNS', 'NNPS'])
tag_reply = pos_tag_parser(reply, target_tag)
tag_previous_one = pos_tag_parser(previous_one, target_tag)
tag_previous_two = pos_tag_parser(previous_two, target_tag)
tag_info.append([tag_reply, tag_previous_one, tag_previous_two])
# append data in loops
header_info.append(header)
body_info.append([reply, previous_one, previous_two])
others_info.append(others + [emails] + [bytedata])
a1 = pd.DataFrame.from_dict(header_info)
a2 = pd.DataFrame(body_info, columns=["reply", "reference_one", "reference_two"])
a3 = pd.DataFrame(others_info, columns=["date", "delivered_to", "to_domains", "error_message", "contained_emails", "long_string"])
if pos_parser:
a4 = pd.DataFrame(tag_info, columns=["tag_reply", "tag_reference_one", "tag_reference_two"])
structure_email = pd.concat([a1, a2, a3, a4], axis=1)
else:
structure_email = pd.concat([a1, a2, a3], axis=1)
return structure_email | 2,536 |
def build_batches(data, conf, turn_cut_type='tail', term_cut_type='tail'):
"""
Build batches
"""
_turns_batches = []
_tt_turns_len_batches = []
_every_turn_len_batches = []
_response_batches = []
_response_len_batches = []
_label_batches = []
batch_len = len(data[six.b('y')]) // conf['batch_size']
for batch_index in six.moves.range(batch_len):
_turns, _tt_turns_len, _every_turn_len, _response, _response_len, _label = build_one_batch(
data, batch_index, conf, turn_cut_type='tail', term_cut_type='tail')
_turns_batches.append(_turns)
_tt_turns_len_batches.append(_tt_turns_len)
_every_turn_len_batches.append(_every_turn_len)
_response_batches.append(_response)
_response_len_batches.append(_response_len)
_label_batches.append(_label)
ans = {
"turns": _turns_batches,
"tt_turns_len": _tt_turns_len_batches,
"every_turn_len": _every_turn_len_batches,
"response": _response_batches,
"response_len": _response_len_batches,
"label": _label_batches
}
return ans | 2,537 |
def is_notebook():
"""Check if pyaedt is running in Jupyter or not.
Returns
-------
bool
"""
try:
shell = get_ipython().__class__.__name__
if shell == "ZMQInteractiveShell":
return True # Jupyter notebook or qtconsole
else:
return False
except NameError:
return False | 2,538 |
def test_handle_check_suite(monkeypatch, capsys):
"""
.
"""
checks.handle_check_suite({"action": "in_progress"}, "abc")
printed = capsys.readouterr()
assert (
printed.out == "Ignoring check_suite action in_progress\n"
), "In progress is skipped"
# pylint: disable=unused-argument
@counter_wrapper
def mock_check_trigger_deploy(form: dict, access_token: str):
"""
.
"""
monkeypatch.setattr(checks, "check_trigger_deploy", mock_check_trigger_deploy)
checks.handle_check_suite({"action": "completed"}, "abc")
assert (
mock_check_trigger_deploy.counter == 1
), "Checked if a deploy needed to be triggered"
monkeypatch.setattr(
api,
"send_github_request",
lambda *args, **kwargs: Response({"message": "failed"}, 403),
)
@counter_wrapper
def create_check(check, cursor=None):
"""
.
"""
assert check.status == "pending", "Check status correct"
@counter_wrapper
def update_check(log, cursor=None):
"""
.
"""
assert log.status == "failure", "Log should fail"
assert log.message == "Failed to create a check run", "Log has expected message"
@counter_wrapper
def add_check_log(log, cursor=None):
"""
.
"""
assert log.status == "log", "Is a log"
assert log.message == "Check run scheduled", "Log has expected message"
@counter_wrapper
def update_commit_status(
url: str, sha: str, access_token: str, status: str, description: str
):
"""
.
"""
assert status == "failure", "Commit should have failed"
monkeypatch.setattr(db, "create_check", create_check)
monkeypatch.setattr(db, "update_check_status", update_check)
monkeypatch.setattr(db, "add_check_log", add_check_log)
monkeypatch.setattr(api, "set_commit_status", update_commit_status)
checks.handle_check_suite(
{
"action": "requested",
"check_suite": {"head_sha": "123abc", "app": {"external_url": "/app"}},
"repository": {"url": "/", "statuses_url": "/statuses"},
},
"abc",
)
printed = capsys.readouterr()
assert printed.out == (
"Requesting a check\n"
"Failed to schedule check: got status 403!\n"
"{'message': 'failed'}\n"
), "Failed to schedule check printed"
assert update_commit_status.counter == 1, "Commit status updated"
assert create_check.counter == 1, "Check created"
assert update_check.counter == 1, "Check status updated"
# pylint: disable=unused-argument
monkeypatch.setattr(
api,
"send_github_request",
lambda *args, **kwargs: Response({"message": "scheduled"}, 201),
)
checks.handle_check_suite(
{
"action": "requested",
"check_suite": {"head_sha": "123abc", "app": {"external_url": "/app"}},
"repository": {"url": "/"},
},
"abc",
)
printed = capsys.readouterr()
assert printed.out == (
"Requesting a check\n" "Check requested successfully\n"
), "Success output printed"
assert create_check.counter == 2, "Check created"
assert add_check_log.counter == 1, "Added check log" | 2,539 |
def strategy(history, memory):
"""
Tit-for-tat, except we punish them N times in a row if this is the Nth time they've
initiated a defection.
memory: (initiatedDefections, remainingPunitiveDefections)
"""
if memory is not None and memory[1] > 0:
choice = 0
memory = (memory[0], memory[1] - 1)
return choice, memory
num_rounds = history.shape[1]
opponents_last_move = history[1, -1] if num_rounds >= 1 else 1
our_last_move = history[0, -1] if num_rounds >= 1 else 1
our_second_last_move = history[0, -2] if num_rounds >= 2 else 1
opponent_initiated_defection = (
opponents_last_move == 0 and our_last_move == 1 and our_second_last_move == 1
)
choice = 0 if opponent_initiated_defection else 1
if choice == 0:
memory = (1, 0) if memory is None else (memory[0] + 1, memory[0])
return choice, memory | 2,540 |
def get_primitives(name=None, primitive_type=None, primitive_subtype=None):
"""Get a list of the available primitives.
Optionally filter by primitive type: ``transformation`` or ``aggregation``.
Args:
primitive_type (str):
Filter by primitive type. ``transformation`` or ``aggregation``.
Returns:
list:
List of the names of the available primitives.
"""
filters = {}
if primitive_type:
if primitive_type not in ('transformation', 'aggregation'):
raise ValueError('primitive_type must be `transformation` or `aggregation`.')
filters['classifiers.type'] = primitive_type
if primitive_subtype:
if primitive_subtype not in ('amplitude', 'frequency', 'frequency_time'):
raise ValueError(
'primitive_subtype must be `amplitude`, `frequency` or `frequency_time`.')
filters['classifiers.subtype'] = primitive_subtype
return discovery.find_primitives(name or 'sigpro', filters) | 2,541 |
def _unpack_batch_channel(data, old_shape):
"""Unpack the data channel dimension.
"""
data = nnvm.sym.transpose(data, axes=(0, 4, 1, 5, 2, 3))
data = nnvm.sym.reshape(data, shape=old_shape)
return data | 2,542 |
def test_atomic_language_length_nistxml_sv_iv_atomic_language_length_1_3(mode, save_output, output_format):
"""
Type atomic/language is restricted by facet length with value 2.
"""
assert_bindings(
schema="nistData/atomic/language/Schema+Instance/NISTSchema-SV-IV-atomic-language-length-1.xsd",
instance="nistData/atomic/language/Schema+Instance/NISTXML-SV-IV-atomic-language-length-1-3.xml",
class_name="NistschemaSvIvAtomicLanguageLength1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
) | 2,543 |
def get_branch_user(branch):
"""Get user name for given branch."""
with Command('git', 'log', '--pretty=tformat:%an', '-1', branch) as cmd:
for line in cmd:
return line | 2,544 |
def log_handler(*loggers, logname: str = ''):
"""[summary]
Keyword Arguments:
logname {str} -- [description] (default: {''})
"""
formatter = logging.Formatter(
'%(asctime)s %(filename)12s:L%(lineno)3s [%(levelname)8s] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
# stream handler
shell_handler = logging.StreamHandler(sys.stdout)
shell_handler.setLevel(logging.INFO)
shell_handler.setFormatter(formatter)
# file handler
if logname:
file_handler = logging.FileHandler(logname)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
for logger in loggers:
if logname:
logger.addHandler(file_handler)
logger.addHandler(shell_handler)
logger.setLevel(logging.DEBUG) | 2,545 |
def consume_fio_output(cons, result, numjobs, mode, bs, env_id):
"""Consumer function."""
cpu_utilization_vmm = result[CPU_UTILIZATION_VMM]
cpu_utilization_vcpus = result[CPU_UTILIZATION_VCPUS_TOTAL]
cons.consume_measurement(CPU_UTILIZATION_VMM, cpu_utilization_vmm)
cons.consume_measurement(CPU_UTILIZATION_VCPUS_TOTAL,
cpu_utilization_vcpus)
read_values(cons, numjobs, env_id, mode, bs, "iops")
read_values(cons, numjobs, env_id, mode, bs, "bw") | 2,546 |
def find_system_cameras() -> Mapping[str, str]:
"""Returns a camera_description -> camera_path map."""
if sys.platform == 'darwin' or sys.platform in ('windows', 'win32'):
try:
from .qtmultimedia import find_system_cameras
except ImportError as e:
return {}
else:
return find_system_cameras()
else: # desktop Linux and similar
from electrum_cintamani import qrscanner
return qrscanner.find_system_cameras() | 2,547 |
def _shape_list(x):
"""Return list of dims, statically where possible."""
static = x.get_shape().as_list()
shape = tf.shape(x)
ret = []
for i, static_dim in enumerate(static):
dim = static_dim or shape[i]
ret.append(dim)
return ret | 2,548 |
def test_held_karp_ascent():
"""
Test the Held-Karp relaxation with the ascent method
"""
import networkx.algorithms.approximation.traveling_salesman as tsp
np = pytest.importorskip("numpy")
# Adjacency matrix from page 1153 of the 1970 Held and Karp paper
# which have been edited to be directional, but also symmetric
G_array = np.array(
[
[0, 97, 60, 73, 17, 52],
[97, 0, 41, 52, 90, 30],
[60, 41, 0, 21, 35, 41],
[73, 52, 21, 0, 95, 46],
[17, 90, 35, 95, 0, 81],
[52, 30, 41, 46, 81, 0],
]
)
solution_edges = [(1, 3), (2, 4), (3, 2), (4, 0), (5, 1), (0, 5)]
G = nx.from_numpy_array(G_array, create_using=nx.DiGraph)
opt_hk, z_star = tsp.held_karp_ascent(G)
# Check that the optimal weights are the same
assert round(opt_hk, 2) == 207.00
# Check that the z_stars are the same
solution = nx.DiGraph()
solution.add_edges_from(solution_edges)
assert nx.utils.edges_equal(z_star.edges, solution.edges) | 2,549 |
def upgrade_common(ctx, config, deploy_style):
"""
Common code for upgrading
"""
remotes = upgrade_remote_to_config(ctx, config)
project = config.get('project', 'ceph')
extra_pkgs = config.get('extra_packages', [])
log.info('extra packages: {packages}'.format(packages=extra_pkgs))
for remote, node in remotes.items():
system_type = teuthology.get_system_type(remote)
assert system_type in ('deb', 'rpm')
pkgs = get_package_list(ctx, config)[system_type]
log.info("Upgrading {proj} {system_type} packages: {pkgs}".format(
proj=project, system_type=system_type, pkgs=', '.join(pkgs)))
if isinstance(extra_pkgs, dict):
pkgs += extra_pkgs.get(system_type, [])
else:
pkgs += extra_pkgs
installed_version = packaging.get_package_version(remote, 'ceph-common')
upgrade_version = get_upgrade_version(ctx, node, remote)
log.info("Ceph {s} upgrade from {i} to {u}".format(
s=system_type,
i=installed_version,
u=upgrade_version
))
if _upgrade_is_downgrade(installed_version, upgrade_version):
raise RuntimeError(
"An attempt to upgrade from a higher version to a lower one "
"will always fail. Hint: check tags in the target git branch."
)
deploy_style(ctx, node, remote, pkgs, system_type)
verify_package_version(ctx, node, remote)
return len(remotes) | 2,550 |
def nested_put(config: Dict[str, Any], nested_keys: List[str], value: Any) -> None:
"""
Puts the given nested key value pair into the given dict. If any part of
the nested key structure does not yet exist, then it will be created in the
process.
>>> config = {}
>>> nested_put(config, ["key"], "value")
>>> config["key"]
'value'
>>> config = {}
>>> nested_put(config, ["settings", "gyre_mp_threads"], 2)
>>> config["settings"]["gyre_mp_threads"]
2
"""
if len(nested_keys) == 0:
raise Exception("Invalid number of nested keys.")
if len(nested_keys) == 1:
config[nested_keys[0]] = value
else:
next_key = nested_keys[0]
if next_key not in config:
config[next_key] = {}
nested_put(config[next_key], nested_keys[1:], value) | 2,551 |
def convert2define(name):
"""
returns the name of the define used according to 'name' which is the name of the file
"""
header = toupper(toalphanum(name))
return "__" + header + "__" | 2,552 |
def url_exists(url):
"""
Checks if a url exists
:param url:
:return:
"""
p = urlparse(url)
conn = httplib.HTTPConnection(p.netloc)
conn.request('HEAD', p.path)
resp = conn.getresponse()
return resp.status == 301 or resp.status == 200 | 2,553 |
def add_header(cmd):
"""
:param cmd: the command with its values
:return: adds a header and returns it, ready to be send
"""
# get the length of the length of the cmd (for how many spaces needed)
header = str(len(cmd))
for i in range(get_digits(len(cmd)), HEADERSIZE):
header = header + " "
return header + cmd | 2,554 |
def stat_selector(player, stat, in_path, year):
"""
Selects stat for player in game year selected
Parameters
----------
player
The player being assessed (str)
stat
The stat being assessed (str)
in_path
The path to the folder containing player data (str)
year
The year of game to look at (int)
Returns
-------
stat_selected
A number indicating the selected stat value (int)
"""
df = fifa_file_opener(in_path, year)
player_row = df.loc[df["short_name"] == player]
stat_selected = int(player_row[stat])
return stat_selected | 2,555 |
def info(text, newline=True):
""" write the text to standard error followed by a newline """
sys.stderr.write("%s%s" % (text, "\n" if newline else "")) | 2,556 |
def test_iou_metric_compute(
outputs: List[torch.Tensor],
targets: List[torch.Tensor],
weights: List[float],
class_names: List[str],
batch_answers: List[List[float]],
total_answers: List[List[Union[List[float], float]]],
):
"""IOU update, compute test"""
metric = IOUMetric(weights=weights, class_names=class_names)
for output, target, batch_answer, total_answer in zip(
outputs, targets, batch_answers, total_answers
):
batch_score = metric.update(output, target)
total_score = metric.compute()
assert len(batch_answer) == len(batch_score)
for pred, answer in zip(batch_score, batch_answer):
assert abs(pred - answer) < EPS
assert len(total_score) == len(total_score)
for pred, answer in zip(total_score, total_score):
if isinstance(pred, list):
for pred_sample, answer_sample in zip(pred, answer):
assert abs(pred_sample - answer_sample) < EPS
else:
assert abs(pred - answer) < EPS | 2,557 |
def get_cali_samples(train_data_loader, num_samples, no_label=True):
"""Generate sub-dataset for calibration.
Args:
train_data_loader (torch.utils.data.DataLoader):
num_samples (int):
no_label (bool, optional): If the dataloader has no labels. Defaults to True.
Returns:
torch.Tensor: Concatenated data matrix.
"""
cali_data_list = []
if no_label:
for batch_data in train_data_loader:
cali_data_list.append(batch_data["image"])
if len(cali_data_list) >= num_samples:
break
else:
for batch_data, _ in train_data_loader:
cali_data_list.append(batch_data)
if len(cali_data_list) >= num_samples:
break
return torch.cat(cali_data_list, dim=0)[:num_samples].cpu() | 2,558 |
def visualize(args, epoch, model, data_loader, writer):
"""
Logs visualisations of reconstructions to Tensorboard.
:param args: Arguments object, contains reconstruction model hyperparameters.
:param epoch: current training epoch.
:param model: reconstruction model.
:param data_loader: visualisation data loader.
:param writer: Tensorboard writer.
"""
def save_image(image, tag):
image -= image.min()
image /= image.max()
grid = torchvision.utils.make_grid(image, nrow=4, pad_value=1)
writer.add_image(tag, grid, epoch)
model.train()
with torch.no_grad():
for iter, data in enumerate(data_loader):
_, _, _, input, target, _, _, _, _ = data
input = input.unsqueeze(1).to(args.device)
target = target.unsqueeze(1).to(args.device)
recon = model(input)
save_image(target, 'Target')
save_image(recon, 'Reconstruction')
save_image(torch.abs(target - recon), 'Error')
break | 2,559 |
def fetch_available_litteraturbanken_books() -> List[Tuple[str, str]]:
"""Fetch available books from Litteraturbanken."""
url = "https://litteraturbanken.se/api/list_all/etext?exclude=text,parts,sourcedesc,pages,errata&filter_and=%7B%22sort_date_imprint.date:range%22:%221248,2020%22,%22export%3Etype%22:%5B%22xml%22,%22txt%22,%22workdb%22%5D%7D&filter_or=%7B%7D&filter_string=&from=0&include=lbworkid,titlepath,title,titleid,work_titleid,shorttitle,mediatype,searchable,imported,sortfield,sort_date_imprint.plain,main_author.authorid,main_author.surname,main_author.type,work_authors.authorid,work_authors.surname,startpagename,has_epub,sort_date.plain,export&partial_string=true&sort_field=popularity%7Cdesc&suggest=true&to=1000"
response = requests.get(url)
response.raise_for_status()
response = json.loads(response.text)
books = []
for book in response["data"]:
has_text = False
for export in book["export"]:
if export["type"] == "txt":
has_text = True
break
if not has_text:
continue
filename = "LB_{}_{}_{}_etext.txt".format(book["main_author"]["authorid"], book["titleid"], book["sort_date_imprint"]["plain"])
if filename in blacklist:
continue
books.append((filename, book["lbworkid"]))
return books | 2,560 |
def soup_from_psf(psf):
"""
Returns a Soup from a .psf file
"""
soup = pdbatoms.Soup()
curr_res_num = None
is_header = True
for line in open(psf):
if is_header:
if "NATOM" in line:
n_atom = int(line.split()[0])
is_header = False
continue
words = line.split()
atom_num = int(words[0])
chain_id = words[1]
res_num = int(words[2])
res_type = words[3]
atom_type = words[4]
charge = float(words[6])
mass = float(words[7])
if chain_id.startswith('WT') or chain_id.startswith('ION'):
is_hetatm = True
chain_id = " "
else:
is_hetatm = False
chain_id = chain_id[0]
if curr_res_num != res_num:
res = pdbatoms.Residue(res_type, chain_id, res_num)
soup.append_residue(res)
curr_res_num = res_num
atom = pdbatoms.Atom()
atom.vel = v3.vector()
atom.chain_id = chain_id
atom.is_hetatm = is_hetatm
atom.num = atom_num
atom.res_num = res_num
atom.res_type = res_type
atom.type = atom_type
atom.mass = mass
atom.charge = charge
atom.element = data.guess_element(res_type, atom_type)
soup.insert_atom(-1, atom)
if len(soup.atoms()) == n_atom:
break
convert_to_pdb_atom_names(soup)
return soup | 2,561 |
def check_api():
"""
复核货品入库
post req: withlock
{
erp_order_code,
lines: [{
barcode, location, lpn, qty
},]
w_user_code,
w_user_name
}
"""
w_user_code = request.json.pop('w_user_code', None)
w_user_name = request.json.pop('w_user_name', None)
order = Stockin.query.t_query.filter_by(erp_order_code=request.json.pop('erp_order_code')) \
.with_for_update().first()
if order.state == 'create' or order.state == 'part':
lines = request.json['lines']
action = StockinAction(order)
for line in lines:
line['qty'] = int(line.get('qty', 0) or 0)
if line.get('qty', 0) <= 0:
continue
action.check(order=order, w_user_code=w_user_code, w_user_name=w_user_name, **line)
order.state = 'part'
# 不允许超收时,收完一次后,判断单子是否入库完成; 允许超收的话,单子只能手动关闭
if not g.owner.is_overcharge:
finish = True
for line in order.lines:
if not (line.qty_real >= line.qty):
finish = False
order.state = 'all' if finish else 'part'
if order.state == 'all':
order.finish()
db.session.commit()
return json_response({'status': 'success', 'msg': u'ok', 'data': order.as_dict})
db.session.rollback()
return json_response({'status': 'fail', 'msg': u'订单在(%s)状态中,不能再收货'%(order.state), 'data': order.as_dict}) | 2,562 |
def test_qy_assert_():
"""
Test the qy-LLVM assert_() construct.
"""
# should not raise
@emit_and_execute()
def _():
qy.assert_(True)
# should raise
from qy import EmittedAssertionError
def should_raise():
@emit_and_execute()
def _():
qy.assert_(False)
assert_raises(EmittedAssertionError, should_raise) | 2,563 |
def test_integration_download_fail(demisto_client, tmp_path):
"""
Given
- Script to download, that exists on the machine.
- Playbook to download, that doesn't exist on the machine.
When
- Running demisto-sdk download command.
Then
- Ensure that the exit code is 1, since the playbook was not downloaded.
"""
env = Environment(tmp_path)
pack_path = join(DEMISTO_SDK_PATH, env.PACK_INSTANCE_PATH)
runner = CliRunner(mix_stderr=False)
result = runner.invoke(main, [DOWNLOAD_COMMAND, "-o", pack_path, "-i", "TestScript", "-i", "DummyPlaybook1", "-f"])
assert "FILE NAME REASON" in result.output[350:1050]
assert "----------- ---------------------------------------" in result.output
assert 'DummyPlaybook1 File does not exist in Demisto instance' in result.output
assert result.exit_code == 1 | 2,564 |
def quadratic_form(u, Q, v, workers=1, **kwargs):
"""
Compute the quadratic form uQv, with broadcasting
Parameters
----------
u : (..., M) array
The u vectors of the quadratic form uQv
Q : (..., M, N) array
The Q matrices of the quadratic form uQv
v : (..., N) array
The v vectors of the quadratic form uQv
workers : int, optional
The number of parallel threads to use along gufunc loop dimension(s).
If set to -1, the maximum number of threads (as returned by
``multiprocessing.cpu_count()``) are used.
Returns
-------
qf : (...) array
The result of the quadratic forms
Notes
-----
Numpy broadcasting rules apply.
Implemented for types single, double, csingle and cdouble. Numpy
conversion rules apply.
This is similar to PDL inner2
Examples
--------
The result in absence of broadcasting is just as np.dot(np.dot(u,Q),v)
or np.dot(u, np.dot(Q,v))
>>> u = np.array([2., 3.])
>>> Q = np.array([[1.,1.], [0.,1.]])
>>> v = np.array([1.,2.])
>>> quadratic_form(u,Q,v)
12.0
>>> np.dot(np.dot(u,Q),v)
12.0
>>> np.dot(u, np.dot(Q,v))
12.0
"""
with _setup_gulinalg_threads(workers):
out = _impl.quadratic_form(u, Q, v, **kwargs)
return out | 2,565 |
def reorg(dat):
"""This function grabs the data from the dictionary of data types
(organized by ID), and combines them into the
:class:`dolfyn.ADPdata` object.
"""
outdat = apb.ADPdata()
cfg = outdat['config'] = db.config(_type='Nortek AD2CP')
cfh = cfg['filehead config'] = dat['filehead config']
cfg['model'] = (cfh['ID'].split(',')[0][5:-1])
outdat['props'] = {}
outdat['props']['inst_make'] = 'Nortek'
outdat['props']['inst_model'] = cfg['model']
outdat['props']['inst_type'] = 'ADP'
for id, tag in [(21, ''), (24, '_b5'), (26, '_ar')]:
if id == 26:
collapse_exclude = [0]
else:
collapse_exclude = []
if id not in dat:
continue
dnow = dat[id]
cfg['burst_config' + tag] = lib.headconfig_int2dict(
lib.collapse(dnow['config'], exclude=collapse_exclude,
name='config'))
outdat['mpltime' + tag] = lib.calc_time(
dnow['year'] + 1900,
dnow['month'],
dnow['day'],
dnow['hour'],
dnow['minute'],
dnow['second'],
dnow['usec100'].astype('uint32') * 100)
tmp = lib.beams_cy_int2dict(
lib.collapse(dnow['beam_config'], exclude=collapse_exclude,
name='beam_config'), 21)
cfg['ncells' + tag] = tmp['ncells']
cfg['coord_sys' + tag] = tmp['cy']
cfg['nbeams' + tag] = tmp['nbeams']
for ky in ['SerialNum', 'cell_size', 'blanking',
'nom_corr', 'data_desc',
'vel_scale', 'power_level']:
# These ones should 'collapse'
# (i.e., all values should be the same)
# So we only need that one value.
cfg[ky + tag] = lib.collapse(dnow[ky], exclude=collapse_exclude,
name=ky)
for ky in ['c_sound', 'temp', 'press',
'heading', 'pitch', 'roll',
'temp_press', 'batt_V',
'temp_mag', 'temp_clock',
'mag', 'accel',
'ambig_vel', 'xmit_energy',
'error', 'status0', 'status',
'_ensemble', 'ensemble']:
# No if statement here
outdat[ky + tag] = dnow[ky]
for ky in [
'vel', 'amp', 'corr',
'alt_dist', 'alt_quality', 'alt_status',
'ast_dist', 'ast_quality', 'ast_offset_time',
'ast_pressure',
'altraw_nsamp', 'altraw_dist', 'altraw_samp',
'echo',
'orientmat', 'angrt',
'percent_good',
'std_pitch', 'std_roll', 'std_heading', 'std_press'
]:
if ky in dnow:
outdat[ky + tag] = dnow[ky]
for grp, keys in defs._burst_group_org.items():
if grp not in outdat and \
len(set(defs._burst_group_org[grp])
.intersection(outdat.keys())):
outdat[grp] = db.TimeData()
for ky in keys:
if ky == grp and ky in outdat and \
not isinstance(outdat[grp], db.TimeData):
tmp = outdat.pop(grp)
outdat[grp] = db.TimeData()
outdat[grp][ky] = tmp
#print(ky, tmp)
if ky + tag in outdat and not \
isinstance(outdat[ky + tag], db.TimeData):
outdat[grp][ky + tag] = outdat.pop(ky + tag)
# Move 'altimeter raw' data to it's own down-sampled structure
if 26 in dat:
ard = outdat['altraw'] = db.MappedTime()
for ky in list(outdat.iter_data(include_hidden=True)):
if ky.endswith('_ar'):
grp = ky.split('.')[0]
if '.' in ky and grp not in ard:
ard[grp] = db.TimeData()
ard[ky.rstrip('_ar')] = outdat.pop(ky)
N = ard['_map_N'] = len(outdat['mpltime'])
parent_map = np.arange(N)
ard['_map'] = parent_map[np.in1d(outdat.sys.ensemble, ard.sys.ensemble)]
outdat['config']['altraw'] = db.config(_type='ALTRAW', **ard.pop('config'))
outdat.props['coord_sys'] = {'XYZ': 'inst',
'ENU': 'earth',
'BEAM': 'beam'}[cfg['coord_sys'].upper()]
tmp = lib.status2data(outdat.sys.status) # returns a dict
outdat.orient['orient_up'] = tmp['orient_up']
# 0: XUP, 1: XDOWN, 4: ZUP, 5: ZDOWN
# Heding is: 0,1: Z; 4,5: X
return outdat | 2,566 |
def human2pickett(name: str, reduction="A", linear=True, nuclei=0):
""" Function for translating a Hamiltonian parameter to a Pickett
identifier.
An alternative way of doing this is to programmatically
generate the Pickett identifiers, and just use format string
to output the identifier.
"""
pickett_parameters = read_yaml(
os.path.expanduser("~") + "/.pyspectools/pickett_terms.yml"
)
if name is "B" and linear is True:
# Haven't thought of a clever way of doing this yet...
identifier = 100
elif name is "B" and linear is False:
identifier = 20000
else:
# Hyperfine terms
if name in ["eQq", "eQq/2"]:
identifier = str(pickett_parameters[name]).format(nuclei)
elif "D_" in name or "del" in name:
identifier = str(pickett_parameters[name][reduction])
else:
try:
identifier = pickett_parameters[name]
except KeyError:
print("Parameter name unknown!")
return identifier | 2,567 |
def findAndRemove( type, attrs, parent=None ):
"""Removes all the objects of a specific type under a specific parent that have attributes matching attrs"""
children = getFilteredTypeList(type, attrs, parent)
for child in children:
remove(child) | 2,568 |
def get_oauth2_service_account_keys():
"""A getter that returns the required OAuth2 service account keys.
Returns:
A tuple containing the required keys as strs.
"""
return _OAUTH2_SERVICE_ACCOUNT_KEYS | 2,569 |
def output(s):
"""Outputs string s as chat message.
Send the given string to the chat client.
"""
pass | 2,570 |
def read_conf_file(
toml_path: Path,
file_desc: str,
schema_type: str,
) -> Any:
"""Read TOML configuration and verify against schema."""
if not toml_path.exists():
logger.error(f'{file_desc} file "{toml_path}" does not exist')
sys.exit(1)
try:
toml_dict = toml.load(toml_path)
except TypeError:
logger.error(f'Error in {file_desc} filename "{toml_path}"')
sys.exit(1)
except toml.TomlDecodeError as e:
logger.error(f"File {toml_path} is not valid TOML:")
logger.error(e)
sys.exit(1)
if schema_type == "combine":
file_schema = COMBINE_SCHEMA
elif schema_type == "plot":
file_schema = PLOTTING_SCHEMA
else:
logger.error(f"unknown schema type {schema_type}")
sys.exit(1)
try:
validated = file_schema.validate(toml_dict)
except SchemaError as e:
logger.error(e)
sys.exit(1)
return validated | 2,571 |
def mocked_requests_post(*args, **kwargs):
"""Mock to replace requests.post"""
class MockResponse:
"""Mock class for KustoResponse."""
def __init__(self, json_data, status_code):
self.json_data = json_data
self.text = text_type(json_data)
self.status_code = status_code
self.headers = None
def json(self):
"""Get json data from response."""
return self.json_data
if args[0] == "https://somecluster.kusto.windows.net/v2/rest/query":
if "truncationmaxrecords" in kwargs["json"]["csl"]:
if json.loads(kwargs["json"]["properties"])["Options"]["deferpartialqueryfailures"]:
file_name = "query_partial_results_defer_is_true.json"
else:
file_name = "query_partial_results_defer_is_false.json"
elif "Deft" in kwargs["json"]["csl"]:
file_name = "deft.json"
with open(os.path.join(os.path.dirname(__file__), "input", file_name), "r") as response_file:
data = response_file.read()
return MockResponse(json.loads(data), 200)
elif args[0] == "https://somecluster.kusto.windows.net/v1/rest/mgmt":
if kwargs["json"]["csl"] == ".show version":
file_name = "versionshowcommandresult.json"
else:
file_name = "adminthenquery.json"
with open(os.path.join(os.path.dirname(__file__), "input", file_name), "r") as response_file:
data = response_file.read()
return MockResponse(json.loads(data), 200)
return MockResponse(None, 404) | 2,572 |
def grammar_info(df, col):
"""return three separate attributes with
clean abstract, flesh score and sentence count"""
df['clean_abstract'] = clean_text(df[col])
df['flesch_score'] = df[col].apply(flesch_score)
df['sentence_count'] = sentence_count(df[col])
return df | 2,573 |
def urbandict(bot, event, *args):
"""lookup a term on Urban Dictionary.
supplying no parameters will get you a random term.
DISCLAIMER: all definitions are from http://www.urbandictionary.com/ - the bot and its
creators/maintainers take no responsibility for any hurt feelings.
"""
term = " ".join(args)
if not term:
url = "http://www.urbandictionary.com/random.php"
else:
url = "http://www.urbandictionary.com/define.php?term=%s" % \
urlquote(term)
f = urlopen(url)
data = f.read().decode('utf-8')
urbanDictParser = UrbanDictParser()
try:
urbanDictParser.feed(data)
except IndexError:
# apparently, nothing was returned
pass
if len(urbanDictParser.translations) > 0:
html_text = ""
the_definition = urbanDictParser.translations[0]
html_text += '<b>"' + the_definition["word"] + '"</b><br /><br />'
if "def" in the_definition:
html_text += _("<b>definition:</b> ") + the_definition["def"].strip().replace("\n", "<br />") + '<br /><br />'
if "example" in the_definition:
html_text += _("<b>example:</b> ") + the_definition["example"].strip().replace("\n", "<br />")
yield from bot.coro_send_message(event.conv, html_text)
else:
if term:
yield from bot.coro_send_message(event.conv, _('<i>no urban dictionary definition for "{}"</i>').format(term))
else:
yield from bot.coro_send_message(event.conv, _('<i>no term from urban dictionary</i>')) | 2,574 |
def read_file_list(bld, file):
"""
Read and process a file list file (.waf_file) and manage duplicate files and possible globbing patterns to prepare
the list for injestion by the project
:param bld: The build context
:param file: The .waf_file file list to process
:return: The processed list file
"""
if not os.path.isfile(os.path.join(bld.path.abspath(), file)):
raise Errors.WafError("Invalid waf file list file: {}. File not found.".format(file))
def _invalid_alias_callback(alias_key):
error_message = "Invalid alias '{}' specified in {}".format(alias_key, file)
raise Errors.WafError(error_message)
def _alias_not_enabled_callback(alias_key, roles):
error_message = "3rd Party alias '{}' specified in {} is not enabled. Make sure that at least one of the " \
"following roles is enabled: [{}]".format(alias_key, file, ', '.join(roles))
raise Errors.WafError(error_message)
# Manage duplicate files and glob hits
dup_set = set()
glob_hits = 0
waf_file_node = bld.path.make_node(file)
waf_file_node_abs = waf_file_node.abspath()
base_path_abs = waf_file_node.parent.abspath()
if not os.path.exists(waf_file_node_abs):
raise Errors.WafError('Invalid WAF file list: {}'.format(waf_file_node_abs))
def _determine_vs_filter(input_rel_folder_path, input_filter_name, input_filter_pattern):
"""
Calculate the vvs filter based on the resulting relative path, the input filter name,
and the pattern used to derive the input relative path
"""
vs_filter = input_filter_name
if len(input_rel_folder_path) > 0:
# If the resulting relative path has a subfolder, the base the filter on the following conditions
if input_filter_name.lower()=='root':
# This is the root folder, use the relative folder subpath as the filter
vs_filter = input_rel_folder_path
else:
# This is a named filter, the filter will place all results under this filter
pattern_dirname = os.path.dirname(input_filter_pattern)
if len(pattern_dirname) > 0:
if input_rel_folder_path != pattern_dirname:
# Strip out the base of the filter name
vs_filter = input_filter_name + '/' + input_rel_folder_path.replace(pattern_dirname, '')
else:
vs_filter = input_filter_name
else:
vs_filter = input_filter_name + '/' + input_rel_folder_path
return vs_filter
def _process_glob_entry(glob_content, filter_name, current_uber_dict):
"""
Process a glob content from the input file list
"""
if 'pattern' not in glob_content:
raise Errors.WafError('Missing keyword "pattern" from the glob entry"')
original_pattern = glob_content.pop('pattern').replace('\\', '/')
if original_pattern.startswith('@'):
ALIAS_PATTERN = re.compile('@.*@')
alias_match = ALIAS_PATTERN.search(original_pattern)
if alias_match:
alias = alias_match.group(0)[1:-1]
pattern = original_pattern[len(alias)+2:]
if alias=='ENGINE':
search_node = bld.path
else:
search_node = bld.root.make_node(bld.ThirdPartyPath(alias))
else:
pattern = original_pattern
search_node = waf_file_node.parent
else:
pattern = original_pattern
search_node = waf_file_node.parent
while pattern.startswith('../'):
pattern = pattern[3:]
search_node = search_node.parent
glob_results = search_node.ant_glob(pattern, **glob_content)
for globbed_file in glob_results:
rel_path = globbed_file.path_from(waf_file_node.parent).replace('\\', '/')
abs_path = globbed_file.abspath().replace('\\', '/')
rel_folder_path = os.path.dirname(rel_path)
vs_filter = _determine_vs_filter(rel_folder_path, filter_name, original_pattern)
if vs_filter not in current_uber_dict:
current_uber_dict[vs_filter] = []
if abs_path in dup_set:
Logs.warn("[WARN] File '{}' specified by the pattern '{}' in waf file '{}' is a duplicate. It will be ignored"
.format(abs_path, original_pattern, waf_file_node_abs))
else:
current_uber_dict[vs_filter].append(rel_path)
dup_set.add(abs_path)
def _clear_empty_uber_dict(current_uber_dict):
"""
Perform house clean in case glob pattern overrides move all files out of a 'root' group.
"""
empty_filters = []
for filter_name, filter_contents in current_uber_dict.items():
if len(filter_contents)==0:
empty_filters.append(filter_name)
for empty_filter in empty_filters:
current_uber_dict.pop(empty_filter)
return current_uber_dict
def _process_uber_dict(uber_section, uber_dict):
"""
Process each uber dictionary value
"""
processed_uber_dict = {}
for filter_name, filter_contents in uber_dict.items():
for filter_content in filter_contents:
if isinstance(filter_content, str):
if '*' in filter_content or '?' in filter_content:
# If this is a raw glob pattern, stuff it into the expected glob dictionary
_process_glob_entry(dict(pattern=filter_content), filter_name, processed_uber_dict)
elif filter_content.startswith('@ENGINE@'):
file_path = os.path.normpath(filter_content.replace('@ENGINE@', bld.engine_path))
if not os.path.exists(file_path):
Logs.warn("[WARN] File '{}' specified in '{}' does not exist. It will be ignored"
.format(file_path, waf_file_node_abs))
else:
if filter_name not in processed_uber_dict:
processed_uber_dict[filter_name] = []
processed_uber_dict[filter_name].append(filter_content)
dup_set.add(file_path)
else:
# This is a straight up file reference.
# Do any processing on an aliased reference
if filter_content.startswith('@'):
processed_path = bld.PreprocessFilePath(filter_content, _invalid_alias_callback,
_alias_not_enabled_callback)
else:
processed_path = os.path.normpath(os.path.join(base_path_abs, filter_content))
if not os.path.exists(processed_path):
Logs.warn("[WARN] File '{}' specified in '{}' does not exist. It will be ignored"
.format(processed_path, waf_file_node_abs))
elif not os.path.isfile(processed_path):
Logs.warn("[WARN] Path '{}' specified in '{}' is a folder, only files or glob patterns are "
"allowed. It will be ignored"
.format(processed_path, waf_file_node_abs))
elif processed_path in dup_set:
Logs.warn("[WARN] File '{}' specified in '{}' is a duplicate. It will be ignored"
.format(processed_path, waf_file_node_abs))
else:
if filter_name not in processed_uber_dict:
processed_uber_dict[filter_name] = []
processed_uber_dict[filter_name].append(processed_path)
dup_set.add(processed_path)
elif isinstance(filter_content, dict):
# Dictionaries automatically go through the glob pattern working
_process_glob_entry(filter_content, filter_name, processed_uber_dict)
else:
raise Errors.WafError("Invalid entry '{}' in file '{}', section '{}/{}'"
.format(filter_content, file, uber_section, filter_name))
return _clear_empty_uber_dict(processed_uber_dict)
def _get_cached_file_list():
"""
Calculate the location of the cached waf_files path
"""
bintemp_path = os.path.join(bld.srcnode.abspath(), BINTEMP_FOLDER)
src_relative_path = file_node.path_from(bld.srcnode)
cached_waf_files_abs_path = os.path.join(bintemp_path, src_relative_path)
return cached_waf_files_abs_path
file_node = bld.path.make_node(file)
# Read the source waf_file list
source_file_list = bld.parse_json_file(file_node)
# Prepare a processed waf_file list
processed_file_list = {}
for uber_file_entry, uber_file_dict in source_file_list.items():
processed_file_list[uber_file_entry] = _process_uber_dict(uber_file_entry, uber_file_dict)
pass
return processed_file_list | 2,575 |
def landing():
"""Landing page"""
return render_template('public/index.html') | 2,576 |
def uniform_decay(distance_array, scale):
"""
Transform a measurement array using a uniform distribution.
The output is 1 below the scale parameter and 0 above it.
Some sample values. Measurements are in multiple of ``scale``; decay value are in fractions of
the maximum value:
+---------------+---------------+
| measurement | decay value |
+===============+===============+
| 0.0 | 1.0 |
+---------------+---------------+
| 0.25 | 1.0 |
+---------------+---------------+
| 0.5 | 1.0 |
+---------------+---------------+
| 0.75 | 1.0 |
+---------------+---------------+
| 1.0 | 1.0 |
+---------------+---------------+
"""
return (distance_array <= scale).astype(np.float64) | 2,577 |
def pid_to_service(pid):
"""
Check if a PID belongs to a systemd service and return its name.
Return None if the PID does not belong to a service.
Uses DBUS if available.
"""
if dbus:
return _pid_to_service_dbus(pid)
else:
return _pid_to_service_systemctl(pid) | 2,578 |
def Frequencies(bands, src):
"""
Count the number of scalars in each band.
:param: bands - the bands.
:param: src - the vtkPolyData source.
:return: The frequencies of the scalars in each band.
"""
freq = dict()
for i in range(len(bands)):
freq[i] = 0;
tuples = src.GetPointData().GetScalars().GetNumberOfTuples()
for i in range(tuples):
x = src.GetPointData().GetScalars().GetTuple1(i)
for j in range(len(bands)):
if x <= bands[j][2]:
freq[j] = freq[j] + 1
break
return freq | 2,579 |
def _load_use_static_shape(ortmodule_config_accessor, data):
"""Loads UseStaticShape from json file onto ORTModule."""
assert hasattr(data, _load_use_static_shape.loading_key)
log.info(f"Found keyword {_load_use_static_shape.loading_key} in json. Loading attributes from file.")
assert isinstance(data.UseStaticShape, bool), f"{_load_use_static_shape.loading_key} must be a boolean"
ortmodule_config_accessor._use_static_shape = data.UseStaticShape | 2,580 |
def label_file(input_file, output_file):
"""
label each feature file
"""
# read input file and save them in dict
features = load_protobuf(input_file)
# for each obstacle ID, sort dict by their timestamp
fea_trajs = build_trajectory(features)
# for each obstacle ID, label them, remove record cannot be labeled
for fea_key, fea_traj in fea_trajs.items():
fea_traj = fea_trajs[fea_key]
fea_traj = TrajectoryToSample.clean(fea_traj)
fea_traj = TrajectoryToSample.label(fea_traj)
for i, fea in enumerate(fea_traj):
if not fea.HasField('label_update_time_delta'):
del fea_traj[i]
continue
if fea.label_update_time_delta < parameters['feature']['threshold_label_time_delta']:
del fea_traj[i]
fea_trajs[fea_key] = fea_traj
# save them in the output file with the same format as the input file
save_protobuf(output_file, fea_trajs.values()) | 2,581 |
def get_loss_fn(loss: str) -> Callable[..., torch.Tensor]:
"""
Get loss function as a PyTorch functional loss based on the name of the loss function.
Choices include 'cross_entropy', 'nll_loss', and 'kl_div'.
Args:
loss: a string indicating the loss function to return.
"""
loss_fn_mapping: Dict[str, Callable[..., torch.Tensor]] = {
'cross_entropy': F.cross_entropy,
'nll_loss': F.nll_loss,
'kl_div': F.kl_div,
}
try:
loss_fn: Callable[..., torch.Tensor] = loss_fn_mapping[loss]
except KeyError:
raise ValueError(f'Loss function {loss} is not supported.')
return loss_fn | 2,582 |
def write_to_pubsub(tw):
"""
Publish to the given pubsub topic.
"""
messages = []
[messages.append({"data": json.loads(line, encoding="utf8")}) for line in tw]
body = json.dumps({"messages": messages}, ensure_ascii=False, encoding="utf8")
publisher = pubsub_v1.PublisherClient()
topic_path = publisher.topic_path("ccc-2020-289323", "tweets")
future = publisher.publish(topic_path, data=body.encode("utf8"))
future.result() | 2,583 |
def tabular_generator(engines):
"""Generator that produces rows for tabular formats (CSV) from the dict
generated by export_engines.
"""
cols = [
'name',
'makerrace',
# 'description', # every value is "No information available"
'size',
'hull',
'hull_integrated',
'hull_threshold',
'thrust_forward',
'thrust_reverse',
'thrust_strafe',
'thrust_pitch',
'thrust_yaw',
'thrust_roll',
'boost_thrust',
'boost_duration',
'boost_attack',
'boost_release',
'travel_thrust',
'travel_charge',
'travel_attack',
'travel_release',
'angular_pitch',
'angular_roll',
]
# output header
yield ['id'] + cols
# output data
for engine_id in sorted(engines.keys()):
engine = engines[engine_id]
yield [engine_id] + [engine.get(col) for col in cols] | 2,584 |
def FormatAddress(chainIDAlias: str, hrp: str, addr: bytes) -> str:
"""FormatAddress takes in a chain prefix, HRP, and byte slice to produce a string for an address."""
addr_str = FormatBech32(hrp, addr)
return f"{chainIDAlias}{addressSep}{addr_str}" | 2,585 |
async def get_journal_scopes(
db_session: Session, user_id: str, user_group_id_list: List[str], journal_id: UUID
) -> List[JournalPermissions]:
"""
Returns list of all permissions (group user belongs to and user) for provided user and journal.
"""
journal_spec = JournalSpec(id=journal_id)
await find_journal(db_session, journal_spec)
if journal_id is None:
raise JournalNotFound(
"In order to get journal permissions, journal_id must be specified"
)
query = db_session.query(JournalPermissions).filter(
JournalPermissions.journal_id == journal_id
)
if user_id is None and user_group_id_list is None:
raise InvalidParameters(
"In order to get journal permissions, at least one of user_id, or user_group_id_list must be specified"
)
query = query.filter(
or_(
JournalPermissions.holder_id == user_id,
JournalPermissions.holder_id.in_(user_group_id_list),
)
)
journal_permissions = query.all()
if not journal_permissions:
raise PermissionsNotFound(f"No permissions for journal_id={journal_id}")
return journal_permissions | 2,586 |
def clean_script_title(script_title):
"""Cleans up a TV/movie title to save it as a file name.
"""
clean_title = re.sub(r'\s+', ' ', script_title).strip()
clean_title = clean_title.replace('\\', BACKSLASH)
clean_title = clean_title.replace('/', SLASH)
clean_title = clean_title.replace(':', COLON)
clean_title = clean_title.replace('*', STAR)
clean_title = clean_title.replace('<', LESS_THAN)
clean_title = clean_title.replace('>', GREATER_THAN)
clean_title = clean_title.replace('?', QUESTION_MARK)
clean_title = clean_title.replace('|', PIPE)
return clean_title | 2,587 |
def runCmd(cmd, timeout=42, sh=False, env=None, retry=0):
"""
Execute an external command, read the output and return it.
@param cmd (str|list of str): command to be executed
@param timeout (int): timeout in sec, after which the command is forcefully terminated
@param sh (bool): True if the command is to be run in a shell and False if directly
@param env (dict): environment variables for the new process (instead of inheriting from the current process)
@param retry (int): number of retries on command timeout
@return: (stdout, stderr, rc) (str, str, int): the output of the command
"""
trace = ""
logger = get_logger()
if isinstance(cmd, str):
log_cmd = cmd
else:
log_cmd = ' '.join(cmd)
if log_cmd.startswith("/usr/lpp/mmfs/bin/mmccr fget"): # drop temp file name
log_cmd = ' '.join(log_cmd.split()[:-1])
t_start = time.time()
try:
if env is not None:
fullenv = dict(os.environ)
fullenv.update(env)
env = fullenv
# create the subprocess, ensuring a new process group is spawned
# so we can later kill the process and all its child processes
proc = subprocess.Popen(cmd, shell=sh,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
close_fds=False, env=env)
timer = threading.Timer(timeout, _stop_process, [proc, logger, log_cmd, timeout])
timer.start()
(sout, serr) = proc.communicate()
timer.cancel() # stop the timer when we got data from process
ret = proc.poll()
except OSError as e:
logger.debug(str(e))
sout = ""
serr = str(e)
ret = 127 if "No such file" in serr else 255
finally:
try:
proc.stdout.close()
proc.stderr.close()
except: #pylint: disable=bare-except
pass
t_run = time.time() - t_start
cmd_timeout = ret in (-signal.SIGTERM, -signal.SIGKILL) # 143,137
if ret == -6 and retry >= 0 : # special handling for sigAbrt
logger.warning("retry abrt %s with subprocess %s", cmd, s32)
(sout, serr, ret) = runCmd(cmd, timeout, sh, env, -1)
if cmd_timeout and retry > 0:
retry -= 1
logger.warning("Retry command %s counter: %s", cmd, retry)
(sout, serr, ret) = runCmd(cmd, timeout, sh, env, retry)
elif cmd_timeout:
serr = CMD_TIMEDOUT
logger.warning("runCMD: %s Timeout:%d ret:%s", cmd, timeout, ret)
elif trace:
logger.debug("runCMD: %s :(%d) ret:%s \n%s \n%s", cmd, timeout, ret, serr, sout)
return (sout, serr, ret) | 2,588 |
def _calc_cumsum_matrix_jit(X, w_list, p_ar, open_begin):
"""Fast implementation by numba.jit."""
len_x, len_y = X.shape
# cumsum matrix
D = np.ones((len_x, len_y), dtype=np.float64) * np.inf
if open_begin:
X = np.vstack((np.zeros((1, X.shape[1])), X))
D = np.vstack((np.zeros((1, D.shape[1])), D))
w_list[:, 0] += 1
# number of patterns
num_pattern = p_ar.shape[0]
# max pattern length
max_pattern_len = p_ar.shape[1]
# pattern cost
pattern_cost = np.zeros(num_pattern, dtype=np.float64)
# step cost
step_cost = np.zeros(max_pattern_len, dtype=np.float64)
# number of cells
num_cells = w_list.shape[0]
for cell_idx in range(num_cells):
i = w_list[cell_idx, 0]
j = w_list[cell_idx, 1]
if i == j == 0:
D[i, j] = X[0, 0]
continue
for pidx in range(num_pattern):
# calculate local cost for each pattern
for sidx in range(1, max_pattern_len):
# calculate step cost of pair-wise cost matrix
pattern_index = p_ar[pidx, sidx, 0:2]
ii = int(i + pattern_index[0])
jj = int(j + pattern_index[1])
if ii < 0 or jj < 0:
step_cost[sidx] = np.inf
continue
else:
step_cost[sidx] = X[ii, jj] \
* p_ar[pidx, sidx, 2]
pattern_index = p_ar[pidx, 0, 0:2]
ii = int(i + pattern_index[0])
jj = int(j + pattern_index[1])
if ii < 0 or jj < 0:
pattern_cost[pidx] = np.inf
continue
pattern_cost[pidx] = D[ii, jj] \
+ step_cost.sum()
min_cost = pattern_cost.min()
if min_cost != np.inf:
D[i, j] = min_cost
return D | 2,589 |
def test_rmse_particlefilter(pf_output, regression_problem):
"""Assert that the RMSE of the mode of the posterior of the PF is a lot smaller than
the RMSE of the data."""
true_states = regression_problem.solution
mode = pf_output.states.mode
rmse_mode = np.linalg.norm(np.sin(mode) - np.sin(true_states)) / np.sqrt(
true_states.size
)
rmse_data = np.linalg.norm(
regression_problem.observations - np.sin(true_states)
) / np.sqrt(true_states.size)
# RMSE of PF.mode strictly better than RMSE of data
assert rmse_mode < 0.99 * rmse_data | 2,590 |
def plot_jvm_graph(x_series, y_series, crashes, title, filename):
"""Creates a plot based on the x & y data passed in
Creates a plot with one x_series of data and multiple y_series' of data.
y_series should be a dictionary containing a key for each plot of data.
All of the plots need to have the same length. Each data set is on its
own graph, graphs are top down for easy comparison.
"""
figsize = 3 * len(y_series)
plt.rcParams["figure.figsize"] = [figsize, figsize]
plt.title("Java Memory Usage Over Time")
plt.xlabel("Iteration")
for i, (label, series) in enumerate(y_series.iteritems()):
plt.subplot(len(y_series), 1, (i + 1))
plt.plot(x_series, series)
plt.ylabel(label)
# plot crash lines
for crash in crashes:
plt.axvline(crash[0], color='r')
plt.savefig(filename)
plt.close() | 2,591 |
async def test_set_up_local(hass, aioclient_mock):
"""Test we do not set up Almond to connect to HA if we use Hass.io."""
entry = MockConfigEntry(
domain="almond",
data={"type": const.TYPE_LOCAL, "host": "http://localhost:9999"},
)
entry.add_to_hass(hass)
with patch(
"pyalmond.WebAlmondAPI.async_create_device", return_value=mock_coro()
) as mock_create_device:
assert await async_setup_component(hass, "almond", {})
assert entry.state == config_entries.ENTRY_STATE_LOADED
assert len(mock_create_device.mock_calls) == 1 | 2,592 |
def schedules_list(format_):
"""List request schedules in project."""
project_name = get_current_project(error=True)
client = init_client()
response = client.request_schedules_list(project_name=project_name)
client.api_client.close()
print_list(response, LIST_ITEMS, rename_cols=RENAME_COLUMNS, sorting_col=1, fmt=format_) | 2,593 |
def create_field_texture_coordinates(fieldmodule: Fieldmodule, name="texture coordinates", components_count=3,
managed=False) -> FieldFiniteElement:
"""
Create texture coordinates finite element field of supplied name with
number of components 1, 2, or 3 and the components named "u", "v" and "w" if used.
New field is not managed by default.
"""
return create_field_finite_element(fieldmodule, name, components_count,
component_names=("u", "v", "w"), managed=managed, type_coordinate=True) | 2,594 |
def get_cert_sha1_by_openssl(certraw: str) -> str:
"""calc the sha1 of a certificate, return openssl result str"""
res: str = None
tmpname = None
try:
tmpname = tmppath / f"{uuid.uuid1()}.crt"
while tmpname.exists():
tmpname = tmppath / f"{uuid.uuid1()}.crt"
tmpname.write_text(certraw, encoding="utf-8")
cmd = f"openssl x509 -in {tmpname} -fingerprint -noout -sha1"
res = exec_openssl(cmd)
except Exception as ex:
raise Exception(f"Parse ssl data error, err:{ex}")
finally:
if tmpname is not None:
tmpname.unlink()
return res | 2,595 |
def remove(package_name):
"""Removes a holodeck package.
Args:
package_name (str): the name of the package to remove
"""
if package_name not in packages:
raise HolodeckException("Unknown package name " + package_name)
for config, path in _iter_packages():
if config["name"] == package_name:
shutil.rmtree(path) | 2,596 |
def get_files(root_path, extension='*.*'):
"""
- root_path: Path raiz a partir de onde serão realizadas a busca
- extension: Extensão de arquivo usado para filtrar o retorno
- retorna: Retorna todos os arquivos recursivamente a partir de um path raiz
"""
return [y for x in os.walk(root_path) for y in glob(os.path.join(x[0], extension))] | 2,597 |
def get_dderivative_skewness(uni_ts: Union[pd.Series, np.ndarray], step_size: int = 1) -> np.float64:
"""
:return: The skewness of the difference derivative of univariate time series within the
function we use step_size to find derivative (default value of step_size is 1).
"""
return get_skewness(_difference_derivative(uni_ts, step_size)) | 2,598 |
def extract_query(e: Event, f, woi, data):
"""
create a query array from the the event
:param data:
:param e:
:param doi:
"""
assert woi[0] > 0 and woi[1] > 0
e_start_index = resolve_esi(e, data)
st = int(e_start_index - woi[0] * f)
ed = int(e_start_index + woi[0] * f)
return Event(e.name, e.startT - woi[0], e.endT + woi[1], data[st:ed]), st, ed | 2,599 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.