content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def get_possible_zeros(coefficients: list) -> list:
"""Rational Zeros Theorem possible zeros of a polynomial function.
Args:
coefficients (list): The coefficients of a polynomial function,
in order of degree including all from a_n to a_0.
Returns:
list: A list containing all possible zeros, negative and positive.
"""
possible_zeros = []
# Obtain factors of a_n and a_0
factors_an = get_factors(coefficients[0])
factors_a0 = get_factors(coefficients[-1])
# Generate all possible zeros, skipping duplicates.
for i in factors_a0:
possible_zeros.extend([i, -i])
for j in factors_an:
frac = Fraction(i, j)
if frac not in possible_zeros:
possible_zeros.extend([frac, -frac])
# Sort the possible zeros in absolute value order.
possible_zeros.sort(key=abs)
return possible_zeros | 2,400 |
def read_aa_traj(ns):
"""Read atomistic trajectory
ns creates:
aa_universe
"""
print("Reading All Atom (AA) trajectory")
ns.aa_universe = mda.Universe(ns.aa_tpr_filename, ns.aa_traj_filename,
in_memory=True, refresh_offsets=True,
guess_bonds=False) # setting guess_bonds=False disables angles, dihedrals and improper_dihedrals guessing, which is activated by default in some MDA versions
print(" Found", len(ns.aa_universe.trajectory), "frames") | 2,401 |
def verify_province():
"""
Verify if user's province is British Columbia.
:precondition: User's input must be a string
:postcondition: Correctly verify if user's province is British Columbia
:raise ValueError if the user enters an empty string when prompted for input
"""
try:
# Ask user for their Province or Territory
user_province = province_selector()
# If the user enters an empty string, raise a ValueError
if user_province == "":
raise ValueError
# Catch the ValueError that may be risen in the try
except ValueError:
print("A province or territory name cannot be blank, please try again")
else:
# If the user entered British Columbia in some capacity, open a link to the emergency funding on gov.bc.ca
if user_province == "BRITISH COLUMBIA" or user_province == "BC":
print("Because you are a post-secondary student, BC's government is offering you emergency support. "
"A link has been opened in your browser for your educational viewing.")
time.sleep(2) # Wait two seconds before opening the link to provide time for user to digest the message
open_link("https://news.gov.bc.ca/releases/2020AEST0018-000615")
else:
return | 2,402 |
def view() -> pn.Column:
"""# Bootstrap Dashboard Page.
Creates a Bootstrap Dashboard Page with a Chart and a Table
- inspired by the [GetBoostrap Dashboard Template]
(https://getbootstrap.com/docs/4.4/examples/dashboard/)
- implemented using the `awesome_panel' Python package and in particular the
`awesome_panel.express.templates.BootstrapDashboardTemplate`
Returns:
pn.Column -- The Orders View
"""
table = pn.Pane(
_get_table_data(),
sizing_mode="stretch_width",
)
pn.config.sizing_mode = "stretch_width"
main = [
APPLICATION.intro_section(),
pn.Column(
pnx.SubHeader("Dashboard"),
pn.pane.HoloViews(_holoviews_chart()),
),
pn.Column(
pnx.SubHeader("Section Title"),
table,
),
]
return site.create_template(title="Bootstrap Dashboard", main=main, main_max_width="800px") | 2,403 |
def load_tests(loader, tests, ignore):
"""
Creates a ``DocTestSuite`` for each module named in ``DOCTEST_MODULES``
and adds it to the test run.
"""
for module in DOCTEST_MODULES:
tests.addTests(doctest.DocTestSuite(module))
return tests | 2,404 |
def main(departure, arrival, next, transport, verbose):
""" Search trips with Trainline and returns it in csv """
# Get current datetime > from_date
from_date_obj = datetime.now()
# Decode duration (ex : 1day => timedelta(days=1))
delta = _decode_next_param(next)
# Calculate the end date > to_date
to_date_obj = from_date_obj + delta
# Convert the datetime objects to strings
from_date = from_date_obj.strftime("%d/%m/%Y %H:%M")
to_date = to_date_obj.strftime("%d/%m/%Y %H:%M")
if transport == "any":
transport = None
if verbose:
print()
print("Search trips from {} to {}, between {} and {}\n".format(
departure, arrival, from_date, to_date))
results = trainline.search(
departure_station=departure,
arrival_station=arrival,
from_date=from_date,
to_date=to_date,
transportation_mean=transport)
print(results.csv())
if verbose:
print()
print("{} results".format(len(results))) | 2,405 |
def run_all(names, args):
"""
Function is called by main program
"""
print(BANNER)
valid_accounts = check_storage_accounts(names, args.threads,
args.nameserver)
if valid_accounts and not args.quickscan:
brute_force_containers(valid_accounts, args.brute, args.threads)
check_azure_websites(names, args.nameserver, args.threads)
check_azure_databases(names, args.nameserver, args.threads)
check_azure_vms(names, args.nameserver, args.threads) | 2,406 |
def buildGeneMap(identifiers, separator="|"):
"""build map of predictions to genes.
Use an identifier syntax of species|transcript|gene. If none is
given, all transcripts are assumed to be from their own gene.
"""
map_id2gene, map_gene2ids = {}, {}
for id in identifiers:
f = id.split(separator)
if len(f) < 3:
gene = id
else:
gene = f[0] + separator + f[2]
map_id2gene[id] = gene
if gene not in map_gene2ids:
map_gene2ids[gene] = []
map_gene2ids[gene].append(id)
return map_id2gene, map_gene2ids | 2,407 |
def flexible_set_value(object, value, attr=True, null_others=True):
"""Given an object, set either value_int, value_str, or value_float as appropriate.
:param attr: if True, the attribute is set. If False, the dictionary value is set instead.
:param null_others: if True, the remaining values are set to None. If False, they are ignored.
"""
all_names = ['value_float', 'value_int', 'value_str']
if isinstance(value, float):
assigned_name = 'value_float'
elif isinstance(value, int):
assigned_name = 'value_int'
elif isinstance(value, str):
assigned_name = 'value_str'
else:
raise TypeError("Unable to assign this value to any of "+str(all_names))
if attr:
set_function = object.__setattr__
else:
set_function = object.__setitem__
set_function(assigned_name, value)
if null_others:
for other_name in all_names:
if other_name!=assigned_name:
set_function(other_name, None) | 2,408 |
def get_kline(symbol, end_date, freq, start_date=None, count=None):
"""获取K线数据
:param symbol: str
聚宽标的代码
:param start_date: datetime
截止日期
:param end_date: datetime
截止日期
:param freq: str
K线级别,可选值 ['1min', '5min', '30min', '60min', 'D', 'W', 'M']
:param count: int
K线数量,最大值为 5000
:return: pd.DataFrame
>>> start_date = datetime.strptime("20200701", "%Y%m%d")
>>> end_date = datetime.strptime("20200719", "%Y%m%d")
>>> df1 = get_kline(symbol="000001.XSHG", start_date=start_date, end_date=end_date, freq="1min")
>>> df2 = get_kline(symbol="000001.XSHG", end_date=end_date, freq="1min", count=1000)
>>> df3 = get_kline(symbol="000001.XSHG", start_date='20200701', end_date='20200719', freq="1min")
>>> df4 = get_kline(symbol="000001.XSHG", end_date='20200719', freq="1min", count=1000)
"""
if count and count > 5000:
warnings.warn(f"count={count}, 超过5000的最大值限制,仅返回最后5000条记录")
# 1m, 5m, 15m, 30m, 60m, 120m, 1d, 1w, 1M
freq_convert = {"1min": "1m", "5min": '5m', '15min': '15m',
"30min": "30m", "60min": '60m', "D": "1d", "W": '1w', "M": "1M"}
end_date = pd.to_datetime(end_date)
if start_date:
start_date = pd.to_datetime(start_date)
data = {
"method": "get_price_period",
"token": get_token(),
"code": symbol,
"unit": freq_convert[freq],
"date": start_date.strftime("%Y-%m-%d"),
"end_date": end_date.strftime("%Y-%m-%d"),
# "fq_ref_date": end_date
}
elif count:
data = {
"method": "get_price",
"token": get_token(),
"code": symbol,
"count": count,
"unit": freq_convert[freq],
"end_date": end_date.strftime("%Y-%m-%d"),
# "fq_ref_date": end_date
}
else:
raise ValueError("start_date 和 count 不能同时为空")
r = requests.post(url, data=json.dumps(data))
df = text2df(r.text)
df['symbol'] = symbol
df.rename({'date': 'dt', 'volume': 'vol'}, axis=1, inplace=True)
df = df[['symbol', 'dt', 'open', 'close', 'high', 'low', 'vol']]
for col in ['open', 'close', 'high', 'low', 'vol']:
df.loc[:, col] = df[col].apply(lambda x: round(float(x), 2))
df.loc[:, "dt"] = pd.to_datetime(df['dt'])
return df | 2,409 |
def create_trial_instance(
trial_def: Type[det.Trial],
checkpoint_dir: str,
config: Optional[Dict[str, Any]] = None,
hparams: Optional[Dict[str, Any]] = None,
) -> det.Trial:
"""
Create a trial instance from a Trial class definition. This can be a useful
utility for debugging your trial logic in any development environment.
Arguments:
trial_def: A class definition that inherits from the det.Trial interface.
checkpoint_dir:
The checkpoint directory that the trial will use for loading and
saving checkpoints.
config:
An optional experiment configuration that is used to initialize the
:class:`determined.TrialContext`. If not specified, a minimal default
is used.
"""
determined_common.set_logger(
util.debug_mode() or det.ExperimentConfig(config or {}).debug_enabled()
)
env, rendezvous_info, hvd_config = det._make_local_execution_env(False, config, hparams)
trial_context = trial_def.trial_context_class(env, hvd_config)
return trial_def(trial_context) | 2,410 |
def test_duplicate_submission(app, pg_driver, cgci_blgsp, submitter):
"""
Make sure that concurrent transactions don't cause duplicate submission.
"""
data = {
"type": "experiment",
"submitter_id": "BLGSP-71-06-00019",
"projects.id": "daa208a7-f57a-562c-a04a-7a7c77542c98"
}
# convert to TSV (save to file)
file_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"data/experiment_tmp.tsv"
)
with open(file_path, "w") as f:
dw = csv.DictWriter(f, sorted(data.keys()), delimiter="\t")
dw.writeheader()
dw.writerow(data)
# read the TSV data
data = None
with open(file_path, "r") as f:
data = f.read()
os.remove(file_path) # clean up (delete file)
assert data
program, project = BLGSP_PATH.split('/')[3:5]
tsv_data = TSVToJSONConverter().convert(data)[0]
doc_args = [None, 'tsv', data, tsv_data]
utx1, utx2 = [UploadTransaction(
program=program,
project=project,
role=ROLES['UPDATE'],
logger=app.logger,
flask_config=app.config,
index_client=app.index_client,
external_proxies=get_external_proxies(),
db_driver=pg_driver,
) for _ in range(2)]
response = ""
with pg_driver.session_scope(can_inherit=False) as s1:
with utx1:
utx1.parse_doc(*doc_args)
with pg_driver.session_scope(can_inherit=False) as s2:
with utx2:
utx2.parse_doc(*doc_args)
with pg_driver.session_scope(session=s2):
utx2.flush()
with pg_driver.session_scope(session=s2):
utx2.post_validate()
with pg_driver.session_scope(session=s2):
utx2.commit()
try:
with pg_driver.session_scope(session=s1):
utx1.flush()
except IntegrityError:
s1.rollback()
utx1.integrity_check()
response = utx1.json
assert response["entity_error_count"]==1
assert response["code"]==400
assert response['entities'][0]['errors'][0]['message'] == "experiment with {'project_id': 'CGCI-BLGSP', 'submitter_id': 'BLGSP-71-06-00019'} already exists in the DB"
with pg_driver.session_scope():
assert pg_driver.nodes(md.Experiment).count() == 1 | 2,411 |
def magnitude(number: SnailNumber) -> int:
"""Calculates the magnitude of asnail number
Args:
number (SnailNumber): input number
Returns:
(int): mangitude
Examples:
>>> magnitude([[1, 1], [2, 2]])
35
>>> magnitude([[[[0,7],4],[[7,8],[6,0]]],[8,1]])
1384
"""
if isinstance(number, int):
return number
return 3 * magnitude(number[0]) + 2 * magnitude(number[1]) | 2,412 |
def _load_model_from_tarball(tarball_path, gpg_home_dir):
"""Load a model from a tarball
Args:
tarball_path: a path to a model gzipped tar file
gpg_home_dir: home directory for gpg to verify signed model (e.g. path/to/.gnupg)
Returns:
something of type SerializableModel
"""
with tarfile.open(tarball_path, "r") as tar_file:
return model_metadata.load_from_tarfile(tar_file, gpg_home_dir=gpg_home_dir) | 2,413 |
def read_csv():
"""
Reads data from .csv file
"""
# reads data and stores it into dataframe
df = pd.read_csv('train_set', sep=',')
df_test = pd.read_csv('test_set', sep=',')
split_dataframe(df, df_test) | 2,414 |
def strip_begin_end_key(key) :
"""
Strips off newline chars, BEGIN PUBLIC KEY and END PUBLIC KEY.
"""
return key.replace("\n", "")\
.replace("-----BEGIN PUBLIC KEY-----", "").replace("-----END PUBLIC KEY-----", "") | 2,415 |
def add_expdvr(npoints: int, qmin: float, qmax: float) -> DVRSpecification:
"""Register a new exponential DVR
Args:
npoints (int): number of grid points
qmin (float): minimal x value
qmax (float): maximal x value
"""
return DVRSpecification("ExponentialDVR", npoints, qmin, qmax) | 2,416 |
def main():
"""Main CLI function.
:rtype: int
:returns: exit code, which is sum of all exit codes
"""
opts, args = parse_args()
setup_logging(opts.verbose)
if opts.show_configs:
print_configs()
return 0
build_env = os.environ.copy()
chpl_misc = get_chpl_misc(opts, args, build_env)
build_configs = get_configs(opts)
config_count_str = '{0} configuration{1}'.format(
len(build_configs),
's' if len(build_configs) > 1 else '')
logging.info('Building {0}.'.format(config_count_str))
def list_config_names():
"""Return a complete formatted table showing all the chapel configs in this build."""
names = []
for i, build_config in enumerate(build_configs):
if i == 0:
# prepend header row
build_config_name = build_config.pretty_str(header=True)
if not build_config_name:
build_config_name = 'None'
names.append('')
names.append(' ' + build_config_name)
# normal table row
build_config_name = build_config.pretty_str()
if not build_config_name:
build_config_name = 'None'
names.append('%3d / %3d %s' % (i+1, len(build_configs), build_config_name))
return names
logging.info('\n'.join(list_config_names()))
make_logfile = chpl_misc['make_logfile']
chpl_home = chpl_misc['chpl_home']
if make_logfile:
print('\n[BUILD_CONFIGS] CHPL_HOME={0}'.format(chpl_home), file=make_logfile)
print('\n[BUILD_CONFIGS] Building {0}'.format(config_count_str), file=make_logfile)
print('\n'.join(list_config_names()), file=make_logfile)
statuses = [0,]
with elapsed_time('All {0}'.format(config_count_str)):
for i, build_config in enumerate(build_configs):
result = build_chpl(
'{0} / {1}'.format(i+1, len(build_configs)),
build_config,
build_env,
chpl_misc,
parallel=opts.parallel,
verbose=opts.verbose,
dry_run=opts.dry_run,
)
statuses.append(result)
# exit from this program.
exit_code = max(statuses)
return exit_code | 2,417 |
def write_markdown(path, content):
""" Write gathered metadata in the markdown format """
to_print = ""
if content['setup']:
to_print += "# Setup\n" + content['setup'] + '\n\n'
if content['action'] or content['expected']:
to_print += "# Test\n\n"
if content['action']:
to_print += "## Step\n" + content['action'] + '\n\n'
if content['expected']:
to_print += "## Expect\n" + content['expected'] + '\n\n'
if content['cleanup']:
to_print += "# Cleanup\n" + content['cleanup'] + '\n'
try:
with open(path, 'w', encoding='utf-8') as md_file:
md_file.write(to_print)
echo(style(
f"Test case successfully stored into '{path}'.", fg='magenta'))
except IOError:
raise ConvertError(f"Unable to write '{path}'.") | 2,418 |
def _add_rule_pattern_constraints_to_table(decoder, table):
"""Adds an additional column to the given table, defining
additional constraints assumed by rule patterns in rows.
"""
global _trace
if _trace and _process_table(table):
print "*** processing table: %s ***" % table.name
constraint_col = len(table.columns())
table.add_column(dgen_core.BitField('$pattern', 31, 0))
for row in table.rows():
_add_rule_pattern_constraints_to_row(
decoder, table, row, constraint_col) | 2,419 |
def sync_now(r, **attr):
"""
Manual synchronization of a repository
@param r: the S3Request
@param attr: controller options for the request
"""
T = current.T
auth = current.auth
response = current.response
rheader = attr.get("rheader", None)
if rheader:
rheader = rheader(r)
output = {"title": T("Manual Synchronization"),
"rheader": rheader,
}
s3task = current.s3task
sync = current.sync
if not auth.s3_logged_in():
auth.permission.fail()
if r.interactive:
if r.http in ("GET", "POST"):
repository = r.record
if not repository:
r.error(404, current.ERROR.BAD_RECORD)
form = FORM(DIV(T("Click 'Start' to synchronize with this repository now:"),
),
DIV(INPUT(_class = "tiny primary button",
_type = "submit",
_value = T("Start"),
),
),
_class="sync-now-form",
)
if form.accepts(r.post_vars, current.session):
task_id = s3task.run_async("sync_synchronize",
args = [repository.id],
vars = {"user_id": auth.user.id,
"manual": True,
},
)
if task_id is False:
response.error = T("Could not initiate manual synchronization.")
elif task_id is None:
# No scheduler running, has run synchronously
response.flash = T("Manual synchronization completed.")
else:
sync.set_status(manual=True)
response.flash = T("Manual synchronization started in the background.")
else:
r.error(405, current.ERROR.BAD_METHOD)
else:
r.error(415, current.ERROR.BAD_FORMAT)
status = sync.get_status()
if status.running:
output["form"] = T("Synchronization currently active - refresh page to update status.")
elif not status.manual:
output["form"] = form
else:
output["form"] = T("Manual synchronization scheduled - refresh page to update status.")
response.view = "update.html"
return output | 2,420 |
def ensure_valid_positions(epochs):
"""Make sure the EEG channel positions are valid.
If channels are bipolar and referenced to M1 or M2, rename them to just the
first derivation so that autoreject can be used.
"""
ch_names = epochs.info['ch_names']
if all(['-' not in c for c in ch_names]): # Only monopolar channels
pass
elif all([c.endswith('-M1') or c.endswith('-M2') for c in ch_names]):
ch_mapping = {c: c.split('-')[0] for c in ch_names}
epochs.rename_channels(ch_mapping)
epochs.set_montage('standard_1020')
else:
raise ValueError('Bipolar channels are referenced to another channel '
'than M1 or M2.') | 2,421 |
def checkdnsrr():
"""Check DNS records corresponding to a given
Internet host name or IP address"""
return NotImplementedError() | 2,422 |
def test_get_sub_by_id(session):
"""ASTM Compliance Test: DSS0030_E_GET_SUB_BY_ID."""
resp = session.get('/subscriptions/{}'.format(SUB_ID))
assert resp.status_code == 200, resp.content
data = resp.json()
assert data['subscription']['id'] == SUB_ID
assert data['subscription']['notification_index'] == 0
assert data['subscription']['callbacks'] == {
'identification_service_area_url': 'https://example.com/foo'
} | 2,423 |
def get_horizontal_rainbow_00():
"""
Returns the main horizontal rainbow
Programs that use this function:
- Diagonal Ripple 1
- Diagonal Ripple 2
- Diagonal Ripple 3
- Diagonal Ripple 4
- Double Ripple 1
- Double Ripple 2
- Double Ripple 3
- Double Ripple 4
- Horizontal Ripple 1
- Horizontal Ripple 2
- Moving Horizontal Rainbow 1
- Moving Horizontal Rainbow 2
"""
rainbow00 = [
[R, O, Y, G, B, I, V, W],
[R, O, Y, G, B, I, V, W],
[R, O, Y, G, B, I, V, W],
[R, O, Y, G, B, I, V, W],
[R, O, Y, G, B, I, V, W],
[R, O, Y, G, B, I, V, W],
[R, O, Y, G, B, I, V, W],
[R, O, Y, G, B, I, V, W]
]
return rainbow00 | 2,424 |
def check_sweep(oDesign, setup_name, sweepname):
"""
Check that SweepName is in the SetupName. If not raise an exception.
Parameters
----------
oDesign : pywin32 COMObject
The HFSS oDesign object upon which to operate.
SetupName : str
Name of HFSS setup to use, for example "Setup1"
SweepName : str
Name of HFSS sweep to use, for example "LastAdaptive"
Returns
-------
None
"""
# Get all of the sweeps in the setup and test the name
sweeps = get_sweeps(oDesign, setup_name)
if sweepname not in sweeps:
raise Exception("SweepName not in the Setup.") | 2,425 |
def xml_attr_or_element(xml_node, name):
""" Attempt to get the value of name from the xml_node. This could be an attribute or
a child element.
"""
attr_val = xml_node.get(name, None)
if attr_val is not None:
return attr_val.encode('utf-8').strip()
for child in xml_node.getchildren():
if child.tag == name:
return child.text.encode('utf-8').strip()
return None | 2,426 |
def kmeans_init_centroids(x_array, num_centroids_K):
"""
This function initializes K centroids that are to be used in K-means on the dataset x_array.
Parameters
----------
x_array : array_like
The dataset of size (m x n).
num_centroids_K : int
The number of clusters.
Returns
-------
rand_init_centroids : array_like
Centroids of the clusters. This is a matrix of size (K x n).
Instructions
------------
You should set centroids to randomly chosen examples from the dataset x_array.
"""
numpy.random.seed(seed=42)
num_examples, num_features = x_array.shape
rand_init_centroids = numpy.zeros((num_centroids_K, num_features))
randidx = numpy.random.permutation(num_examples)
# Take the first K examples as centroids
rand_init_centroids = x_array[randidx[:num_centroids_K], :]
return rand_init_centroids | 2,427 |
def get_coinbase_candle_url(url, timestamp_from, pagination_id):
"""Get Coinbase candle URL."""
start = timestamp_from.replace(tzinfo=None).isoformat()
url += f"&start={start}"
if pagination_id:
url += f"&end={pagination_id}"
return url | 2,428 |
def part2():
"""This view will be at the path ``/part2``"""
return "Part 2" | 2,429 |
def create_strings_from_file(filename, count):
"""
Create all strings by reading lines in specified files
"""
strings = []
with open(filename, 'r') as f:
lines = [l.strip()[0:200] for l in f.readlines()]
if len(lines) == 0:
raise Exception("No lines could be read in file")
while len(strings) < count:
if len(lines) > count - len(strings):
strings.extend(lines[0:count - len(strings)])
else:
strings.extend(lines)
return strings | 2,430 |
def drive_around(a_th_gld=1.2):
"""
Function that implements the logic with which the robot will decide to navigate in 2D space, it is essentially based on the (frontal and lateral)
distance values of the golden tokens obtained by find_obstacles()
Args:
dist_left (float): distance of the closest golden token on the left
dist_right (float): distance of the closest golden token on the right
dist_front (float): distance of the closest golden token in the frontal portion of plane
a_th_gld (float): threshold for the frontal golden token, default: 1.2
Inner Functions:
find_obstacles(range_front,range_lat) : see find_obstacles() function header
"""
def find_obstacles(range_front=30,range_lat=[80,100]):
"""
Function to find the mean of the distances of the closest golden token (i.e. of the obstacles) on the frontal, the left and the right portions of the robot view.
Args:
range_front (float): positive range in which we want to find the frontal token, default: 30 degrees
range_lat (int[]):list of the two positive angles (that correspond to the lateral areas) in which the robot will search for, default: [80,100] degrees
Returns:
dist_front (float): distance of the closest golden token on the front
dist_left (float): distance of the closest golden token on the left
dist_right (float): distance of the closest golden token on the right
"""
dist_left=dist_right=dist_front= 100
for token in R.see():
if(token.info.marker_type is MARKER_TOKEN_GOLD and token.dist < 2.5):
if token.dist < dist_front and -range_front < token.rot_y < +range_front:
dist_front=token.dist
if token.dist < dist_left and -range_lat[1] < token.rot_y < -range_lat[0] :
dist_left = token.dist
if token.dist < dist_right and range_lat[0] < token.rot_y < range_lat[1] :
dist_right = token.dist
return dist_front,dist_left,dist_right
dist_front,dist_left,dist_right=find_obstacles()
if(dist_front<a_th_gld): #check if the frontal distance is lower than a_th_gld
if(dist_left<=dist_right): #checks if the distance of the left golden token is lower than the one of the right token
if(1.5*dist_left<dist_right): #in this case the the left distance (dist_left) is at least 1.5 times smaller than the right distance (dist_right), so i only need to turn to the right
turn(45,0.1)
print("right a bit...")
else: #the two lateral distances are too similar, better to go forward while turning
drive(20,0.1)
turn(20,0.1)
print("slightly turn to the right...")
elif(1.5*dist_right<dist_left): #if the cycle arrives here, it means that dist_right<dist_left
print("left a bit...")
turn(-45,0.1)
else:
drive(20,0.1)
turn(-35,0.1)
print("slightly turn to the left...")
else: #if none of the previous conditions occured, then go forward
drive(80,0.15)
print("going forward...") | 2,431 |
def a_request(session_request):
"""AnonymousUser request"""
session_request.user = AnonymousUser()
return session_request | 2,432 |
def main():
"""
Connect to a generic device and then commandeer that connection
Generally this would be used for connecting to a terminal server, then doing *something* to
connect to one of its downstream devices (i.e. access terminal server port 123 or whatever).
Once on the console of the downstream device, we can "commandeer" this terminal server
connection object and basically transform it into a connection object of the type (i.e. IOSXE)
that we ultimately want.
"""
# firstly we create the "outer"/"parent" connection -- this would be the connection to the
# terminal server or similar device. generally this will be the `GenericDriver`, but there is
# nothing stopping you from using any other driver type!
term_server_conn = GenericDriver(**GENERIC_DEVICE)
# open this connection
term_server_conn.open()
# here you would normally need to add some logic to get connected to the terminal server port or
# to ssh from this "outer" device to the target device, in this example we're just ssh'ing to a
# device using the generic driver then transforming that to the IOSXE driver so we dont need to
# do anything
# next create, but dont open, a connection for the target device type
target_device_conn = Scrapli(**TARGET_DEVICE)
# we can then call `commandeer` from the "inner"/"child" connection, passing it the connection
# object of the "outer"/"parent"
target_device_conn.commandeer(conn=term_server_conn)
# we can confirm the "target_device_conn" is of course of type IOSXE (in this example), and that
# we see the send_config methods of the *network* driver (rather than the parent connection not
# having those config methods as it is *generic* driver type).
print(type(target_device_conn), dir(target_device_conn))
# we can acquire the config mode (which only the IOSXE driver would know how to do) to confirm
# things are working as expected
target_device_conn.acquire_priv("configuration")
print(target_device_conn.get_prompt())
target_device_conn.acquire_priv("privilege_exec")
print(target_device_conn.get_prompt())
# closing the "inner"/"child" connection will close the "outer"/"parent" connection, so if you
# wish to keep using the outer connection.... dont close this one :)
# target_device_conn.close()
# if you dont close the inner connection, you can keep using the outer connection to do whatever
# it is you need to do with it!
print(term_server_conn.get_prompt()) | 2,433 |
def subplot_index(nrow, ncol, k, kmin=1):
"""Return the i, j index for the k-th subplot."""
i = 1 + (k - kmin) // ncol
j = 1 + (k - kmin) % ncol
if i > nrow:
raise ValueError('k = %d exceeds number of rows' % k)
return i, j | 2,434 |
def save():
"""
-- opens game_save.txt and writes over the old file. writes each players values to the document inorder
-- to save them which will bve read when loading. (tries to adda score to the player inorder to make sure that there
-- are no more than 10 elements in each score array, if array is full the pointless value will be removed)
:return: [void]
"""
with open("game_save.txt", "w") as save_file:
for player in const.PLAYERS:
player.add_score(-1)
save_file.write(str(player)) | 2,435 |
def read_transport_maps(input_dir, ids=None, time=None):
"""
Find and parse all transport maps in a directory.
Returns a list containing the transport maps and start/end timepoints.
Parameters
----------
input_dir : str
The directory in which to look for transport maps.
Alternatively, a pattern may be given, resulting in shell expansion
before each directory is processed.
ids : list of str, optional
Ids to keep the transport maps for.
If not None, any id not in this list will be filtered out of the maps.
The order of ids in the resulting transport maps is also guaranteed
to be the same as this parameter.
time : int or float, optional
If ids is not None, specifies the time at which the ids were measured.
Returns
-------
transport_maps : list of { 't1': float, 't2': float, 'transport_map': anndata.AnnData }
The list of all transport maps
Raises
------
ValueError
If exactly one of (ids, time) is None. Must be both or none.
If no transport map is found in the given directory.
If several transport maps are found for the same timepoints.
Notes
-----
Time points are determined by the filename.
Filenames must end in `_{t1}_{t2}.extension`.
Any transport map not following this convention will be ignored.
If any other dataset file is present in the listed directories and
uses this naming convention, it might be interpreted as a transport
map, yielding unpredictable results.
All wot commands are guaranteed to enforce this naming convention.
"""
transport_maps_inputs = [] # file, start, end
is_pattern = not os.path.isdir(input_dir)
files = os.listdir(input_dir) if not is_pattern else glob.glob(input_dir)
if (ids is None) != (time is None):
raise ValueError("Only one of time and ids is None. Must be both or none")
tmap_times = set()
for path in files:
path = os.path.join(os.path.dirname(input_dir), path) if not is_pattern else path
if os.path.isfile(path):
file_info = wot.io.get_filename_and_extension(os.path.basename(path))
basename = file_info[0]
tokens = basename.split('_')
t1 = tokens[len(tokens) - 2]
t2 = tokens[len(tokens) - 1]
try:
t1 = float(t1)
t2 = float(t2)
except ValueError:
continue
ds = wot.io.read_dataset(path)
if ids is not None and t1 == time:
# subset rows
indices = ds.obs.index.isin(ids)
ds = anndata.AnnData(ds.X[indices], ds.obs.iloc[indices], ds.var)
if ids is not None and t2 == time:
# subset columns
indices = ds.var.index.isin(ids)
ds = anndata.AnnData(ds.X[:, indices], ds.obs, ds.var.iloc[indices])
if (t1, t2) in tmap_times:
raise ValueError("Multiple transport maps found for times ({},{})".format(t1, t2))
else:
tmap_times.add((t1, t2))
transport_maps_inputs.append(
{'transport_map': ds, 't1': t1, 't2': t2})
if not transport_maps_inputs:
raise ValueError("No transport maps found in the given directories")
transport_maps_inputs.sort(key=lambda x: x['t1']) # sort by t1 (start time)
return transport_maps_inputs | 2,436 |
def _check_parents(item):
"""Check parents where all children are checked.
"""
parent = item.parent()
if parent:
checked = parent.object.data().checked.all()
# checked = _all_children_checked(parent) # TEST!
_set_check_state(parent, checked)
_check_parents(parent) | 2,437 |
def _split_keys(keypath, separator):
"""
Splits keys using the given separator:
eg. 'item.subitem[1]' -> ['item', 'subitem[1]'].
"""
if separator:
return keypath.split(separator)
return [keypath] | 2,438 |
def get_code_base_url() -> str | None:
"""Get current code base url."""
code_base = None
with suppress(subprocess.CalledProcessError):
code_base = subprocess.check_output("git config --get remote.origin.url".split()).decode("utf-8").strip()
return code_base | 2,439 |
def _get_referenced_type_equivalences(graphql_types, type_equivalence_hints):
"""Filter union types with no edges from the type equivalence hints dict."""
referenced_types = set()
for graphql_type in graphql_types.values():
if isinstance(graphql_type, (GraphQLObjectType, GraphQLInterfaceType)):
for _, field in graphql_type.fields.items():
if isinstance(field.type, GraphQLList):
referenced_types.add(field.type.of_type.name)
return {
original: union
for original, union in type_equivalence_hints.items()
if union.name in referenced_types
} | 2,440 |
def E(poly, dist=None, **kws):
"""
Expected value operator.
1st order statistics of a probability distribution or polynomial on a given
probability space.
Args:
poly (Poly, Dist) : Input to take expected value on.
dist (Dist) : Defines the space the expected value is taken on.
It is ignored if `poly` is a distribution.
**kws (optional) : Extra keywords passed to dist.mom.
Returns:
(ndarray) : The expected value of the polynomial or distribution, where
`expected.shape==poly.shape`.
Examples:
>>> x = chaospy.variable()
>>> Z = chaospy.Uniform()
>>> print(chaospy.E(Z))
0.5
>>> print(chaospy.E(x**3, Z))
0.25
"""
if not isinstance(poly, (distributions.Dist, polynomials.Poly)):
print(type(poly))
print("Approximating expected value...")
out = quadrature.quad(poly, dist, veceval=True, **kws)
print("done")
return out
if isinstance(poly, distributions.Dist):
dist, poly = poly, polynomials.variable(len(poly))
if not poly.keys:
return numpy.zeros(poly.shape, dtype=int)
if isinstance(poly, (list, tuple, numpy.ndarray)):
return [E(_, dist, **kws) for _ in poly]
if poly.dim < len(dist):
poly = polynomials.setdim(poly, len(dist))
shape = poly.shape
poly = polynomials.flatten(poly)
keys = poly.keys
mom = dist.mom(numpy.array(keys).T, **kws)
A = poly.A
if len(dist) == 1:
mom = mom[0]
out = numpy.zeros(poly.shape)
for i in range(len(keys)):
out += A[keys[i]]*mom[i]
out = numpy.reshape(out, shape)
return out | 2,441 |
def spacify(string, spaces=2):
"""Add spaces to the beginning of each line in a multi-line string."""
return spaces * " " + (spaces * " ").join(string.splitlines(True)) | 2,442 |
def create_stage(AccessLogSettings=None, ApiId=None, AutoDeploy=None, ClientCertificateId=None, DefaultRouteSettings=None, DeploymentId=None, Description=None, RouteSettings=None, StageName=None, StageVariables=None, Tags=None):
"""
Creates a Stage for an API.
See also: AWS API Documentation
Exceptions
:example: response = client.create_stage(
AccessLogSettings={
'DestinationArn': 'string',
'Format': 'string'
},
ApiId='string',
AutoDeploy=True|False,
ClientCertificateId='string',
DefaultRouteSettings={
'DataTraceEnabled': True|False,
'DetailedMetricsEnabled': True|False,
'LoggingLevel': 'ERROR'|'INFO'|'OFF',
'ThrottlingBurstLimit': 123,
'ThrottlingRateLimit': 123.0
},
DeploymentId='string',
Description='string',
RouteSettings={
'string': {
'DataTraceEnabled': True|False,
'DetailedMetricsEnabled': True|False,
'LoggingLevel': 'ERROR'|'INFO'|'OFF',
'ThrottlingBurstLimit': 123,
'ThrottlingRateLimit': 123.0
}
},
StageName='string',
StageVariables={
'string': 'string'
},
Tags={
'string': 'string'
}
)
:type AccessLogSettings: dict
:param AccessLogSettings: Settings for logging access in this stage.\n\nDestinationArn (string) --The ARN of the CloudWatch Logs log group to receive access logs.\n\nFormat (string) --A single line format of the access logs of data, as specified by selected $context variables. The format must include at least $context.requestId.\n\n\n
:type ApiId: string
:param ApiId: [REQUIRED]\nThe API identifier.\n
:type AutoDeploy: boolean
:param AutoDeploy: Specifies whether updates to an API automatically trigger a new deployment. The default value is false.
:type ClientCertificateId: string
:param ClientCertificateId: The identifier of a client certificate for a Stage. Supported only for WebSocket APIs.
:type DefaultRouteSettings: dict
:param DefaultRouteSettings: The default route settings for the stage.\n\nDataTraceEnabled (boolean) --Specifies whether (true) or not (false) data trace logging is enabled for this route. This property affects the log entries pushed to Amazon CloudWatch Logs. Supported only for WebSocket APIs.\n\nDetailedMetricsEnabled (boolean) --Specifies whether detailed metrics are enabled.\n\nLoggingLevel (string) --Specifies the logging level for this route: INFO, ERROR, or OFF. This property affects the log entries pushed to Amazon CloudWatch Logs. Supported only for WebSocket APIs.\n\nThrottlingBurstLimit (integer) --Specifies the throttling burst limit.\n\nThrottlingRateLimit (float) --Specifies the throttling rate limit.\n\n\n
:type DeploymentId: string
:param DeploymentId: The deployment identifier of the API stage.
:type Description: string
:param Description: The description for the API stage.
:type RouteSettings: dict
:param RouteSettings: Route settings for the stage, by routeKey.\n\n(string) --\n(dict) --Represents a collection of route settings.\n\nDataTraceEnabled (boolean) --Specifies whether (true) or not (false) data trace logging is enabled for this route. This property affects the log entries pushed to Amazon CloudWatch Logs. Supported only for WebSocket APIs.\n\nDetailedMetricsEnabled (boolean) --Specifies whether detailed metrics are enabled.\n\nLoggingLevel (string) --Specifies the logging level for this route: INFO, ERROR, or OFF. This property affects the log entries pushed to Amazon CloudWatch Logs. Supported only for WebSocket APIs.\n\nThrottlingBurstLimit (integer) --Specifies the throttling burst limit.\n\nThrottlingRateLimit (float) --Specifies the throttling rate limit.\n\n\n\n\n\n\n
:type StageName: string
:param StageName: [REQUIRED]\nThe name of the stage.\n
:type StageVariables: dict
:param StageVariables: A map that defines the stage variables for a Stage. Variable names can have alphanumeric and underscore characters, and the values must match [A-Za-z0-9-._~:/?#&=,]+.\n\n(string) --\n(string) --A string with a length between [0-2048].\n\n\n\n\n
:type Tags: dict
:param Tags: The collection of tags. Each tag element is associated with a given resource.\n\n(string) --\n(string) --A string with a length between [0-1600].\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'AccessLogSettings': {
'DestinationArn': 'string',
'Format': 'string'
},
'ApiGatewayManaged': True|False,
'AutoDeploy': True|False,
'ClientCertificateId': 'string',
'CreatedDate': datetime(2015, 1, 1),
'DefaultRouteSettings': {
'DataTraceEnabled': True|False,
'DetailedMetricsEnabled': True|False,
'LoggingLevel': 'ERROR'|'INFO'|'OFF',
'ThrottlingBurstLimit': 123,
'ThrottlingRateLimit': 123.0
},
'DeploymentId': 'string',
'Description': 'string',
'LastDeploymentStatusMessage': 'string',
'LastUpdatedDate': datetime(2015, 1, 1),
'RouteSettings': {
'string': {
'DataTraceEnabled': True|False,
'DetailedMetricsEnabled': True|False,
'LoggingLevel': 'ERROR'|'INFO'|'OFF',
'ThrottlingBurstLimit': 123,
'ThrottlingRateLimit': 123.0
}
},
'StageName': 'string',
'StageVariables': {
'string': 'string'
},
'Tags': {
'string': 'string'
}
}
Response Structure
(dict) --
The request has succeeded and has resulted in the creation of a resource.
AccessLogSettings (dict) --
Settings for logging access in this stage.
DestinationArn (string) --
The ARN of the CloudWatch Logs log group to receive access logs.
Format (string) --
A single line format of the access logs of data, as specified by selected $context variables. The format must include at least $context.requestId.
ApiGatewayManaged (boolean) --
Specifies whether a stage is managed by API Gateway. If you created an API using quick create, the $default stage is managed by API Gateway. You can\'t modify the $default stage.
AutoDeploy (boolean) --
Specifies whether updates to an API automatically trigger a new deployment. The default value is false.
ClientCertificateId (string) --
The identifier of a client certificate for a Stage. Supported only for WebSocket APIs.
CreatedDate (datetime) --
The timestamp when the stage was created.
DefaultRouteSettings (dict) --
Default route settings for the stage.
DataTraceEnabled (boolean) --
Specifies whether (true) or not (false) data trace logging is enabled for this route. This property affects the log entries pushed to Amazon CloudWatch Logs. Supported only for WebSocket APIs.
DetailedMetricsEnabled (boolean) --
Specifies whether detailed metrics are enabled.
LoggingLevel (string) --
Specifies the logging level for this route: INFO, ERROR, or OFF. This property affects the log entries pushed to Amazon CloudWatch Logs. Supported only for WebSocket APIs.
ThrottlingBurstLimit (integer) --
Specifies the throttling burst limit.
ThrottlingRateLimit (float) --
Specifies the throttling rate limit.
DeploymentId (string) --
The identifier of the Deployment that the Stage is associated with. Can\'t be updated if autoDeploy is enabled.
Description (string) --
The description of the stage.
LastDeploymentStatusMessage (string) --
Describes the status of the last deployment of a stage. Supported only for stages with autoDeploy enabled.
LastUpdatedDate (datetime) --
The timestamp when the stage was last updated.
RouteSettings (dict) --
Route settings for the stage, by routeKey.
(string) --
(dict) --
Represents a collection of route settings.
DataTraceEnabled (boolean) --
Specifies whether (true) or not (false) data trace logging is enabled for this route. This property affects the log entries pushed to Amazon CloudWatch Logs. Supported only for WebSocket APIs.
DetailedMetricsEnabled (boolean) --
Specifies whether detailed metrics are enabled.
LoggingLevel (string) --
Specifies the logging level for this route: INFO, ERROR, or OFF. This property affects the log entries pushed to Amazon CloudWatch Logs. Supported only for WebSocket APIs.
ThrottlingBurstLimit (integer) --
Specifies the throttling burst limit.
ThrottlingRateLimit (float) --
Specifies the throttling rate limit.
StageName (string) --
The name of the stage.
StageVariables (dict) --
A map that defines the stage variables for a stage resource. Variable names can have alphanumeric and underscore characters, and the values must match [A-Za-z0-9-._~:/?#&=,]+.
(string) --
(string) --
A string with a length between [0-2048].
Tags (dict) --
The collection of tags. Each tag element is associated with a given resource.
(string) --
(string) --
A string with a length between [0-1600].
Exceptions
ApiGatewayV2.Client.exceptions.NotFoundException
ApiGatewayV2.Client.exceptions.TooManyRequestsException
ApiGatewayV2.Client.exceptions.BadRequestException
ApiGatewayV2.Client.exceptions.ConflictException
:return: {
'AccessLogSettings': {
'DestinationArn': 'string',
'Format': 'string'
},
'ApiGatewayManaged': True|False,
'AutoDeploy': True|False,
'ClientCertificateId': 'string',
'CreatedDate': datetime(2015, 1, 1),
'DefaultRouteSettings': {
'DataTraceEnabled': True|False,
'DetailedMetricsEnabled': True|False,
'LoggingLevel': 'ERROR'|'INFO'|'OFF',
'ThrottlingBurstLimit': 123,
'ThrottlingRateLimit': 123.0
},
'DeploymentId': 'string',
'Description': 'string',
'LastDeploymentStatusMessage': 'string',
'LastUpdatedDate': datetime(2015, 1, 1),
'RouteSettings': {
'string': {
'DataTraceEnabled': True|False,
'DetailedMetricsEnabled': True|False,
'LoggingLevel': 'ERROR'|'INFO'|'OFF',
'ThrottlingBurstLimit': 123,
'ThrottlingRateLimit': 123.0
}
},
'StageName': 'string',
'StageVariables': {
'string': 'string'
},
'Tags': {
'string': 'string'
}
}
:returns:
ApiGatewayV2.Client.exceptions.NotFoundException
ApiGatewayV2.Client.exceptions.TooManyRequestsException
ApiGatewayV2.Client.exceptions.BadRequestException
ApiGatewayV2.Client.exceptions.ConflictException
"""
pass | 2,443 |
def simple_line_plot(x, y = None,
title = "",
xlabel = "",
ylabel = "",
context = 'notebook',
xlim = None,
ylim = None,
color = 'blue',
parse_axes = None,
return_axes = False,
label = ""):
"""
plot the mean of a 2D data set with the 95% confidence interval filled
"""
#sns.set()
plt.style.use(['science','ieee'])
sns.set_context(context)
if parse_axes is None:
fig, ax1 = plt.subplots()
else:
ax1 = parse_axes
if y is not None:
ax1.plot(x, y, color = color, label = label)
else:
ax1.plot(x, color = color, label = label)
if xlim is not None:
ax1.set_xlim(xlim)
if ylim is not None:
ax1.set_ylim(ylim)
ax1.set_title(title)
ax1.set_xlabel(xlabel)
ax1.set_ylabel(ylabel)
if return_axes:
return ax1
else:
plt.show() | 2,444 |
def generate(parsed_data, template, opath, dme_vault, helper, **kwargs):
"""Generates collection and data-object metadata needed for DME upload.
For each collection (directory) and data-object (file), an output file is
generated in JSON format. 'opath' dictates where these files will be saved.
Returns a dictionary of collection information where [key]= DME PATH of
the collection to initialized or updated and [value] = abolute PATH of the
collection metadata json file.
"""
template = json2dict(template)
collections = helper(parsed_data, template, opath, dme_vault, **kwargs)
return collections | 2,445 |
def pytest_addoption(parser):
"""Add option to run slow tests."""
parser.addoption("--run-slow", action="store_true",
default=False, help="Run slow tests") | 2,446 |
def _get_snmp(oid, hostname, community):
"""SNMP Wrapper function. Returns tuple of oid, value
Keyword Arguments:
oid --
community --
"""
from pysnmp.entity.rfc3413.oneliner import cmdgen
cmd_gen = cmdgen.CommandGenerator()
error_indication, error_status, error_index, var_bind = cmd_gen.getCmd(
cmdgen.CommunityData(community),
cmdgen.UdpTransportTarget((hostname, 161)),
oid)
if error_indication:
print(error_indication)
else:
if error_status:
print ('%s at %s' % (
error_status.prettyPrint(),
error_index and var_bind[int(error_index)-1] or '?')
)
else:
for name, value in var_bind:
return (name.prettyPrint(), value.prettyPrint()) | 2,447 |
def com_google_fonts_check_048(ttFont):
"""Font has **proper** whitespace glyph names?"""
from fontbakery.utils import get_glyph_name
def getGlyphEncodings(font, names):
result = set()
for subtable in font['cmap'].tables:
if subtable.isUnicode():
for codepoint, name in subtable.cmap.items():
if name in names:
result.add(codepoint)
return result
if ttFont['post'].formatType == 3.0:
yield SKIP, "Font has version 3 post table."
else:
failed = False
space_enc = getGlyphEncodings(ttFont, ["uni0020", "space"])
nbsp_enc = getGlyphEncodings(
ttFont, ["uni00A0", "nonbreakingspace", "nbspace", "nbsp"])
space = get_glyph_name(ttFont, 0x0020)
if 0x0020 not in space_enc:
failed = True
yield FAIL, Message("bad20", ("Glyph 0x0020 is called \"{}\":"
" Change to \"space\""
" or \"uni0020\"").format(space))
nbsp = get_glyph_name(ttFont, 0x00A0)
if 0x00A0 not in nbsp_enc:
if 0x00A0 in space_enc:
# This is OK.
# Some fonts use the same glyph for both space and nbsp.
pass
else:
failed = True
yield FAIL, Message("badA0", ("Glyph 0x00A0 is called \"{}\":"
" Change to \"nbsp\""
" or \"uni00A0\"").format(nbsp))
if failed is False:
yield PASS, "Font has **proper** whitespace glyph names." | 2,448 |
def ensure_format_is_valid( r, dataset_name ):
"""
This extracts the format from the given resource
and maps it according to the formats mapping, if provided."""
if not 'format' in r:
log.error( '%s resources-object is missing format-property. Cannot save this value', dataset_name )
# TODO create error message and exit
return None
format_ = r['format'].strip().lower()
format_ = re.sub( r'[^a-zA-Z0-9]', '_', format_ ) # replace special character in format-attribute with _
format_ = re.sub( r'^_+', '', format_ ) # replace leading _
format_ = re.sub( r'_+$', '', format_ ) # replace trailing _
format_ = re.sub( r'__*', '_', format_ ) # replace double __
if not format_:
log.error( 'Format is not valid after cleanup, original: %s. Will continue with next resource', r['format'] )
return None
format_ = ensure_format_in_dictionary( format_ )
log.info( 'Found valid format "%s"', format_ )
return format_ | 2,449 |
def find_nearest(array, value):
"""
Inputs:
array - array...
value - value to search for in array
Outputs:
array[idx] - nearest value in array
"""
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return array[idx] | 2,450 |
def sync_contacts(contacts, create_missing=True, quiet=True):
"""
contacts is a list of dictionaries like this:
[{
u'E-Mail': u'[email protected]',
u'Gender': 2,
u'First Name': u'Admin',
u'Last Name': u'von Total Berlin',
...
}, ...]
The dictionary keys are mapped to emarsys field ids using
settings.EMARSYS_FIELDS, which can be generated with `get_fields()`.
Fields in settings.EMARSYS_CREATE_ONLY_FIELDS are not sent when updating a
contact.
"""
def log_debug(message):
if not quiet:
print("{}\n".format(message))
def chunked(it, n):
"""
From http://stackoverflow.com/a/8991553
"""
it = iter(it)
while True:
chunk = tuple(slice(it, n))
if not chunk:
return
yield chunk
total_updated = 0
total_created = 0
# emails of contacts that couldn't be updated because they don't exist at
# emarsys
missing_contacts = []
# emails of contacts that couldn't be updated or created due to an error at
# emarsys
failed_contacts = []
contacts = map(_transform_contact_data, contacts)
# Filter contact data using whitelist
if settings.EMARSYS_RECIPIENT_WHITELIST is not None:
contacts = filter(lambda contact: contact[3] # 3=email
in settings.EMARSYS_RECIPIENT_WHITELIST, contacts)
update_contacts, create_contacts = tee(contacts, 2)
# Filter out fields in create_only_fields for updating
create_only_field_ids = [settings.EMARSYS_FIELDS[field_name][0]
for field_name in
settings.EMARSYS_CREATE_ONLY_FIELDS]
update_contacts = [{k: v for k, v in contact.items()
if k not in create_only_field_ids}
for contact in update_contacts]
# Update contacts
for chunk_of_contacts in chunked(update_contacts, BATCH_SIZE):
log_debug("Updating a chunk of {} users."
.format(len(chunk_of_contacts)))
num_successful, errors = _update_contacts(chunk_of_contacts)
log_debug('{} users updated, {} users errored.'
.format(num_successful, len(errors)))
total_updated += num_successful
missing_contacts.extend(email
for email, error_dict in errors.items()
if '2008' in error_dict)
failed_contacts.extend((email, error_dict)
for email, error_dict in errors.items()
if '2008' not in error_dict)
if create_missing:
# Find contacts to create in original contact list
create_contacts = filter(lambda contact: contact[3] in
missing_contacts, create_contacts)
# Create contacts
for chunk_of_contacts in chunked(create_contacts, BATCH_SIZE):
log_debug("Creating a chunk of {} users."
.format(len(chunk_of_contacts)))
num_successful, errors = _create_contacts(chunk_of_contacts)
log_debug('{} users created, {} users errored.'
.format(num_successful, len(errors)))
total_created += num_successful
failed_contacts.extend((email, error_dict)
for email, error_dict in errors.items())
# All contacts were either updated or the update or create failed.
missing_contacts = []
return total_updated, total_created, missing_contacts, failed_contacts | 2,451 |
def vader_entity_sentiment(df,
textacy_col,
entity,
inplace=True,
vader_sent_types=['neg', 'neu', 'pos', 'compound'],
keep_stats=['count', 'mean', 'min', '25%', '50%', '75%', 'max']):
"""
Pull the descriptive sentiment stats of text sentence with a specified entity in it.
Parameters
----------
df : DataFrame
Dataframe which holds the text
textacy_col : str
The name to give to the column with the textacy doc objects
entity : str
The entity to search the textacy Doc object for
inplace : bool
Whether to return the entire df with the sentiment info or the sentiment info alone
Default is False
vader_sent_types : list
The type of sentiment to extract. neg: negative, pos: positive, neu: neutral, compound is
comination of all three types of all
keep_stats : list
A list of the summary statistics to keep. Default is all returned by pandas DataFrame.describe() method
Returns
-------
DataFrame
Either the dataframe passed as arg with the sentiment info as trailing columns
or the sentiment descriptive stats by itself
"""
vader_analyzer = SentimentIntensityAnalyzer()
sentiment_rows = []
for text in df[textacy_col].values:
text_entities = list(entity_statements(text, entity))
# Iterate through all sentences and get sentiment analysis
entity_sentiment_info = [vader_analyzer.polarity_scores(sentence)
for
sentence
in
text_entities]
# After taking sentiments, turn into a dataframe and describe
try:
# Indices and columns to keep
keep_stats = keep_stats
keep_cols = vader_sent_types
# Describe those columns
summary_stats = pd.DataFrame(entity_sentiment_info).describe().loc[keep_stats, keep_cols]
# Add row to list
sentiment_rows.append(pivot_df_to_row(summary_stats))
# If there's nothing to describe
except ValueError as e:
# Create a summary stats with nulls
summary_stats = pd.DataFrame(index=keep_stats, columns=keep_cols)
# Add to list of rows
sentiment_rows.append(pivot_df_to_row(summary_stats))
# Concatenate All rows together into one dataframe
sentiment_df = pd.concat(sentiment_rows).add_prefix(entity+'_')
if not inplace:
return sentiment_df.reset_index(drop=True)
else:
# Return original df with new sentiment attached
return pd.concat([df, sentiment_df], axis=1) | 2,452 |
def install(eventloop=None):
"""
Install a tulip-based reactor.
"""
if eventloop is None:
eventloop = get_event_loop()
reactor = AsyncioSelectorReactor(eventloop)
from twisted.internet.main import installReactor
installReactor(reactor) | 2,453 |
def post_list(request):
"""
Create a view that will return a list of
Posts that were published prior to 'now' and
render them to the 'blogposts.html' template
:param request:
:return:
"""
posts = Post.objects.filter(published_date__lte=timezone.now()
).order_by('-published_date')
return render(request, "blogposts.html", {'posts': posts}) | 2,454 |
def get_model():
"""
Returns a compiled convolutional neural network model. Assume that the
`input_shape` of the first layer is `(IMG_WIDTH, IMG_HEIGHT, 3)`.
The output layer should have `NUM_CATEGORIES` units, one for each category.
"""
# initialize a convolutional model
model = tf.keras.models.Sequential([
# add 3 convolutional layers and 3 max pooling layers to extract features from the images
tf.keras.layers.Conv2D(32, (3, 3), activation="relu", input_shape=(IMG_WIDTH, IMG_HEIGHT, 3)),
tf.keras.layers.AveragePooling2D(pool_size=(2, 2)),
tf.keras.layers.Conv2D(64, (3, 3), activation="relu", input_shape=(IMG_WIDTH, IMG_HEIGHT, 3)),
tf.keras.layers.AveragePooling2D(pool_size=(2, 2)),
tf.keras.layers.Conv2D(128, (3, 3), activation="relu", input_shape=(IMG_WIDTH, IMG_HEIGHT, 3)),
tf.keras.layers.AveragePooling2D(pool_size=(2, 2)),
# flatten the units
tf.keras.layers.Flatten(),
# add a hidden layer
tf.keras.layers.Dense(128, activation="relu"),
#add dropout
tf.keras.layers.Dropout(0.5),
# add the output layer with NUM_CATEGORIES number of output nodes
tf.keras.layers.Dense(NUM_CATEGORIES, activation="softmax")
])
# compile the model
model.compile(
optimizer="adam",
loss="categorical_crossentropy",
metrics=["accuracy"]
)
return model | 2,455 |
def get_app_icon_path():
"""Path to OpenPype icon."""
return resources.get_openpype_icon_filepath() | 2,456 |
def post_create_ipsec_endpoint_tunnel(
api_client,
endpoint_id,
remote_subnet=None,
local_subnet=None,
enabled=None,
ping_ipaddress=None,
ping_interface=None,
ping_interval=None,
description=None,
**kwargs
): # noqa: E501
"""post_create_ipsec_endpoint_tunnel # noqa: E501
Create IPsec endpoint tunnel # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> response = await api.post_create_ipsec_endpoint_tunnel(endpoint_id, remote_subnet=remote_subnet, , async_req=True)
:param int endpoint_id: ID for IPsec endpoint (required)
:param remote_subnet str:
:param local_subnet str:
:param enabled str:
:param ping_ipaddress str:
:param ping_interface str:
:param ping_interval int:
:param description str:
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: APIResponse or awaitable if async
"""
local_var_params = locals()
request_params = [
"remote_subnet",
"local_subnet",
"enabled",
"ping_ipaddress",
"ping_interface",
"ping_interval",
"description",
]
collection_formats = {}
path_params = {"endpoint_id": endpoint_id}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = {}
for param in [p for p in request_params if local_var_params.get(p) is not None]:
body_params[param] = local_var_params[param]
# HTTP header `Accept`
header_params["Accept"] = api_client.select_header_accept(
["application/json"]
) # noqa: E501
# HTTP header `Content-Type`
header_params["Content-Type"] = api_client.select_header_content_type( # noqa: E501
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiTokenAuth", "basicAuth"] # noqa: E501
return api_client.call_api(
"/ipsec/endpoints/{endpoint_id}/tunnels",
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="object", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
) | 2,457 |
def cow_service(
wfo: List[str] = Query(
[], min_length=3, max_length=4, title="WFO Identifiers"
),
begints: datetime = Query(...),
endts: datetime = Query(...),
phenomena: List[str] = Query(None, max_length=2),
lsrtype: List[str] = Query(None, max_length=2),
hailsize: float = Query(1),
lsrbuffer: float = Query(15),
warningbuffer: float = Query(1),
wind: float = Query(58),
windhailtag: str = Query("N"),
limitwarns: str = Query("N"),
fcster: str = None,
):
"""Replaced by __doc__."""
return handler(
wfo,
begints,
endts,
phenomena,
lsrtype,
hailsize,
lsrbuffer,
warningbuffer,
wind,
windhailtag,
limitwarns,
fcster,
) | 2,458 |
def add_edges_reverse_indices(edge_indices, edge_values=None, remove_duplicates=True, sort_indices=True):
"""Add the edges for (i,j) as (j,i) with the same edge values. If they do already exist, no edge is added.
By default, all indices are sorted.
Args:
edge_indices (np.array): Index list of shape (N,2).
edge_values (np.array): Edge values of shape (N,M) matching the edge_indices
remove_duplicates (bool): Remove duplicate edge indices. Default is True.
sort_indices (bool): Sort final edge indices. Default is True.
Returns:
np.array: edge_indices or [edge_indices, edge_values]
"""
clean_edge = None
edge_index_flip = np.concatenate([edge_indices[:,1:2] ,edge_indices[:,0:1]],axis=-1)
edge_index_flip_ij = edge_index_flip[edge_index_flip[:,1] != edge_index_flip[:,0]] # Do not flip self loops
clean_index = np.concatenate([edge_indices,edge_index_flip_ij],axis=0)
if edge_values is not None:
edge_to_add = edge_values[edge_index_flip[:,1] != edge_index_flip[:,0]]
clean_edge = np.concatenate([edge_values,edge_to_add],axis=0)
if remove_duplicates:
un, unis = np.unique(clean_index, return_index=True, axis=0)
mask_all = np.zeros(clean_index.shape[0], dtype=np.bool)
mask_all[unis] = True
mask_all[:edge_indices.shape[0]] = True # keep old indices untouched
clean_index = clean_index[mask_all]
if edge_values is not None:
# clean_edge = clean_edge[unis]
clean_edge = clean_edge[mask_all]
if sort_indices:
order1 = np.argsort(clean_index[:, 1], axis=0, kind='mergesort') # stable!
ind1 = clean_index[order1]
if edge_values is not None:
clean_edge = clean_edge[order1]
order2 = np.argsort(ind1[:, 0], axis=0, kind='mergesort')
clean_index = ind1[order2]
if edge_values is not None:
clean_edge = clean_edge[order2]
if edge_values is not None:
return clean_index, clean_edge
else:
return clean_index | 2,459 |
def isint(var:Any, raise_error:bool=False)-> bool:
"""Check if var is an integer
Args:
var (str): variable to check
raise_error (bool, optional): TypeError raised if set to `True`. Defaults to `False`.
Raises:
TypeError: raised if var is not an integer
Returns:
bool: `True` if var is an integer
"""
is_ =isinstance(var, int)
if not is_ and bool(raise_error):
raise TypeError(f'Integer expected: {var=} is not an int')
return is_ | 2,460 |
def convert_interpolate2d(g, op, x):
"""Operator converter for interpolate 2D(dims == 4)."""
def get_interpolate_mode(op):
"""conver 'interp_method' attr of paddle to tvm"""
interp_method = op.attr("interp_method")
align_corners = op.attr("align_corners")
align_mode = op.attr("align_mode")
rounding_method = ""
if interp_method == "nearest":
interp_method = "nearest_neighbor"
coordinate_transformation_mode = "asymmetric"
rounding_method = "floor"
elif interp_method == "bilinear":
interp_method = "linear"
if not align_corners and align_mode == 0:
coordinate_transformation_mode = "half_pixel"
else:
if align_corners:
coordinate_transformation_mode = "align_corners"
else:
coordinate_transformation_mode = "asymmetric"
elif interp_method == "bicubic":
interp_method = "cubic"
if align_corners:
coordinate_transformation_mode = "align_corners"
else:
coordinate_transformation_mode = "half_pixel"
else:
msg = "interp_method {} is not supported for PaddlePaddle's interpolate"
raise tvm.error.OpAttributeInvalid(msg.format(interp_method))
return rounding_method, interp_method, coordinate_transformation_mode
layout = op.attr("data_layout")
out_h = op.attr("out_h")
out_w = op.attr("out_w")
out_size = [out_h, out_w]
input_out_size = op.input("OutSize")
input_size_tensor = op.input("SizeTensor")
input_scale = op.input("Scale")
if input_size_tensor:
out_size = g.get_node(input_size_tensor[0])
out_size = _infer_value(out_size, g.get_params())
elif input_out_size:
out_size = g.get_node(input_out_size[0])
out_size = _infer_value(out_size, g.get_params())
else:
input_shape = infer_shape(x)
if layout == "NCHW":
in_h, in_w = input_shape[2], input_shape[3]
else:
in_h, in_w = input_shape[1], input_shape[2]
if input_scale:
scale_data = g.get_node(input_scale[0])
scale_data = infer_value(scale_data, g.get_params()).numpy().tolist()
if len(scale_data) > 1:
out_h = int(scale_data[0] * in_h)
out_w = int(scale_data[1] * in_w)
else:
out_h = int(scale_data[0] * in_h)
out_w = int(scale_data[0] * in_w)
out_size = [out_h, out_w]
else:
scale = op.attr("scale")
scale = [float(i) for i in scale]
if len(scale) > 1:
out_h = int(scale[0] * in_h)
out_w = int(scale[1] * in_w)
out_size = [out_h, out_w]
rounding_method, interp_method, coordinate_transformation_mode = get_interpolate_mode(op)
out = _op.image.resize2d(
x,
size=out_size,
layout=layout,
method=interp_method,
coordinate_transformation_mode=coordinate_transformation_mode,
rounding_method=rounding_method,
cubic_alpha=-0.75,
)
g.add_node(op.output("Out")[0], out) | 2,461 |
def analysis_multi_frames(plot_config, args):
"""Analysis for multiple time frames
"""
tframes = range(args.tstart, args.tend + 1)
for tframe in tframes:
plot_config["tframe"] = tframe
plot_phase_diagram(plot_config, show_plot=False) | 2,462 |
def get_stats_on_spatial_predictions_4x5_2x25_by_lat(res='4x5', ex_str='',
target='Iodide',
use_annual_mean=False, filename=None,
folder=None, ds=None,
var2template='Chance2014_STTxx2_I',
debug=False):
"""
Evaluate the spatial predictions between models, binned by latitude
Parameters
-------
target (str): Name of the target variable (e.g. iodide)
res (str): horizontal resolution of dataset (e.g. 4x5)
debug (bool): print out debugging output?
var2template (str): variable to use a template for making new variables in ds
use_annual_mean (bool): use the annual mean of the variable
Returns
-------
(pd.DataFrame)
"""
if isinstance(ds, type(None)):
# If filename or folder not given, then use defaults
if isinstance(filename, type(None)):
filename = 'Oi_prj_predicted_{}_{}.nc'.format(target, res)
if isinstance(folder, type(None)):
data_root = utils.get_file_locations('data_root')
folder = '{}/{}/outputs/'.format(data_root, target)
ds = xr.open_dataset(folder + filename)
# Variables to consider
vars2analyse = list(ds.data_vars)
# Add LWI to array
ds = utils.add_LWI2array(ds=ds, var2template=var2template, res=res)
# - Get general annual stats
df = pd.DataFrame()
# take annual average
if use_annual_mean:
ds_tmp = ds.mean(dim='time')
else:
ds_tmp = ds
for var_ in vars2analyse:
# Mask to only consider (100%) water boxes
arr = ds_tmp[var_].values
if debug:
print(arr.shape, (ds_tmp['IS_WATER'] == False).shape)
arr[(ds_tmp['IS_WATER'] == False).values] = np.NaN
# Update values to include np.NaN
ds_tmp[var_].values = arr
# Setup series objects to hold stats
s_mean = pd.Series()
s_75 = pd.Series()
s_50 = pd.Series()
s_25 = pd.Series()
# Loop by latasave to dataframe
for lat_ in ds['lat'].values:
vals = ds_tmp[var_].sel(lat=lat_).values
stats_ = pd.Series(vals.flatten()).dropna().describe()
# At poles all values will be the same (masked) value
# if len( set(vals.flatten()) ) == 1:
# pass
# else:
# save quartiles and mean
# try:
s_mean[lat_] = stats_['mean']
s_25[lat_] = stats_['25%']
s_75[lat_] = stats_['75%']
s_50[lat_] = stats_['50%']
# except KeyError:
# print( 'Values not considered for lat={}'.format( lat_ ) )
# Save variables to DataFrame
var_str = '{} - {}'
stats_dict = {'mean': s_mean, '75%': s_75, '25%': s_25, 'median': s_50}
for stat_ in stats_dict.keys():
df[var_str.format(var_, stat_)] = stats_dict[stat_]
return df | 2,463 |
def calc_dif_mod_cn (x, y):
""" Check if the difference between the modulus of consecutive numbers is a prime number """
modx = math.sqrt(x.real ** 2 + x.imag ** 2) # modulus of the first complex number
mody = math.sqrt(y.real ** 2 + y.imag ** 2) # modulus of the second complex number
dif = modx-mody
d = 0 # the number of the divisors of dif
if dif == int(dif): # first, we check if the dif is an integer
for i in range(2, int(int(dif)/2 + 1)): # then, we check if it's a prime number
if dif % i == 0:
d = d + 1 # if d = 0, then dif is a prime number
if (d == 0 or dif==2):
return dif
else:
return 0
else:
return 0 | 2,464 |
def mitochondrialGTF(concatenated_gff, output_directory):
"""Convert GFF file with information for mitochondrial genes to GTF and return the path to GTF file.
Keyword arguments:
concatenated_gff -- path to concatenated GFF file
output_directory -- path to directory where GTF file should be placed
Important: transcript name must follow 'compXX_cXX_seqXX'
Notes: Make a separate GTF file with tagged mitochondrial genes (MT_) so that later
it is merged it with the post-processed original GTF file (after removal of mitochondrial genes).
"""
output = output_directory + "/concatenated.gtf"
with open(concatenated_gff, 'r') as infile:
with open(output, 'w') as outfile:
for line in infile:
line=line.strip('\n')
elementList=line.split('\t')
p=re.search(r'((comp\d+_c\d+)_seq\d+)', elementList[0])
geneID=p.group(2)
transcriptID=p.group(1)
outfile.write(elementList[0]+'\tmitofinder\tgene\t'+elementList[3]+'\t'+elementList[4]+'\t'+elementList[5]+'\t'+elementList[6]+'\t'+elementList[7]+'\tgene_id "'+geneID+'"; transcript_id "'+transcriptID+'"; gene_name "'+'MT_'+elementList[0]+'_'+elementList[8]+'";'+'\n')
return output | 2,465 |
def benchmark_read_json():
"""
:return:
"""
start = time.time()
actual_start = time.time()
logger.info("in benchmark %s", start)
counter = 0
last_counter = 0
size = 0
last_size = 0
time_rate = 1
for pmid in pmids:
filename = get_path_from_pmid(pmid, 'xml')
# filename = base_path + 'pubmed_xml/' + pmid + '.xml'
counter = counter + 1
pubmed_data = dict()
try:
with open(filename, 'r') as f:
line = f.read()
size += len(line)
end = time.time()
if end - start > time_rate:
rate = counter - last_counter
size_rate = size - last_size
avg_file_size = size_rate / rate
last_counter = counter
last_size = size
start = end
logger.info("read %s seconds %s entries, %s size, %s size_rate, %s rate, %s avg_file_size", time_rate, counter, size, size_rate, rate, avg_file_size)
# pubmed_data = json.load(f)
# pubmed_data = json.load(line) # uncomment to also test json conversion
f.close()
except Exception as e:
print(str(e))
pass
end = time.time()
diff_time = end - actual_start
logger.info("read %s seconds %s entries, %s size", diff_time, counter, size) | 2,466 |
def backward(outputs, grad_outputs=None, **kwargs):
"""backward(outputs, grad_outputs=None, *, enable_double_backprop=False)
Runs backpropagation from variables simultaneously.
.. warning::
This feature is experimental. The interface can change in the future.
Args:
outputs (tuple or list of :class:`~chainer.Variable`):
A sequence of output variables from which backprop starts.
grad_outputs (None or tuple or list of :class:`~chainer.Variable`):
A sequence of variables that gives the initial value of each output
gradient.
If this argument is ``None``, backprop uses
:attr:`~chainer.Variable.grad_var` of ``outputs``.
enable_double_backprop (bool): If ``True``,
computational trace of the whole backpropagation procedure is
recorded to the computational graph so that one can further do
backpropagation from the resulting gradients. Note that
enabling it results in larger memory consumption needed to
store the gradients w.r.t intermediate variables that are
required for the second gradient computation.
.. seealso::
:meth:`chainer.Variable.backward`
:func:`chainer.grad`
"""
enable_double_backprop, = argument.parse_kwargs(
kwargs, ('enable_double_backprop', False),
retain_grad='semantics for retain_grad=True is under discussion',
loss_scale='chainer.backward does not support loss_scale option',
)
if not isinstance(outputs, (tuple, list)):
raise TypeError(
'outputs must be a tuple or a list, not {}.'.format(type(outputs)))
for v in outputs:
if not isinstance(v, chainer.Variable):
raise TypeError(
'each output must be a Variable, not {}'.format(type(v)))
if grad_outputs is not None:
if not isinstance(grad_outputs, (tuple, list)):
raise TypeError(
'grad_outputs must be None, a tuple, or a list, not {}.'
.format(type(grad_outputs)))
if len(outputs) != len(grad_outputs):
raise ValueError(
'grad_outputs must be of the same length as outputs.\n'
'len(outputs) = {}, len(grad_outputs) = {}'
.format(len(outputs), len(grad_outputs)))
is_chainerx = [v._has_chainerx_array for v in outputs]
if any(is_chainerx):
if not all(is_chainerx):
# The restriction is required as soon as the workarounds below
# are removed.
raise ValueError('cannot mix chainerx and other backends')
# Cannot use chainerx.backward directly, because it does not follow
# retain_grad=False
# TODO(kataoka): Fix chainerx.backward and remove this workaround
if grad_outputs is None:
grad_outputs = []
for y in outputs:
grad_outputs.append(y.grad_var)
y.grad_var = None
# The check is required because chainerx.backward sets default grads.
# TODO(kataoka): Fix chainerx.backward and remove this workaround
indices = [i for i, gy in enumerate(grad_outputs) if gy is not None]
outputs = [outputs[i] for i in indices]
grad_outputs = [grad_outputs[i] for i in indices]
# Use new variables to start backprop
# TODO(kataoka): Implement chainerx.backward(output, grad_outputs)
# and remove this workaround.
outputs = chainer.functions.identity(*outputs)
if not isinstance(outputs, tuple):
outputs = outputs,
grad_outputs = chainer.functions.identity(*grad_outputs)
if not isinstance(grad_outputs, tuple):
grad_outputs = grad_outputs,
# TODO(kataoka): Even after F.identity, non-float grad cannot be set.
# Move the check to elsewhere and remove this workaround.
outputs_ = []
for y, gy in zip(outputs, grad_outputs):
if not y.requires_grad and gy is not None:
warnings.warn(
'Some of grads are ignored by chainer.backward.\n'
'backend: ChainerX, '
'output.dtype: {}, grad_output.dtype: {}'.format(
y.dtype, gy.dtype),
RuntimeWarning)
continue
y.grad_var = gy
outputs_.append(y)
outputs = outputs_
del outputs_
# See also the ChainerX case of Variable.backward
arrs = []
for y in outputs:
arr = y._data[0]
assert isinstance(arr, chainerx.ndarray)
arrs.append(arr)
chainerx.backward(
arrs, enable_double_backprop=enable_double_backprop)
return
if grad_outputs is None:
grad_outputs = []
for y in outputs:
grad_var = y.grad_var
if grad_var is None:
warnings.warn(
'outputs contains a Variable without grad, or '
'duplicate outputs. Note that '
'chainer.backward does not set default grad.',
RuntimeWarning)
y.grad_var = None
grad_outputs.append(grad_var)
outputs = [
(y.node, gy) for y, gy in zip(outputs, grad_outputs) if gy is not None]
with chainer.using_config('enable_backprop', enable_double_backprop):
_backprop_to_all(outputs, False, None) | 2,467 |
def get_none_zero_region(im, margin):
"""
get the bounding box of the non-zero region of an ND volume
"""
input_shape = im.shape
if(type(margin) is int ):
margin = [margin]*len(input_shape)
assert(len(input_shape) == len(margin))
indxes = np.nonzero(im)
idx_min = []
idx_max = []
for i in range(len(input_shape)):
idx_min.append(indxes[i].min())
idx_max.append(indxes[i].max())
for i in range(len(input_shape)):
idx_min[i] = max(idx_min[i] - margin[i], 0)
idx_max[i] = min(idx_max[i] + margin[i], input_shape[i] - 1)
return idx_min, idx_max | 2,468 |
def create_short_link(request):
"""Given an URL, return a shortened link.
Args:
url: URL to be shortened.
Returns:
short_link: Shortened result of URL.
expires_at: Timestamp before which the link is valid.
"""
payload = json.loads(request.body)
url = payload.get("url")
try:
URLValidator(["http", "https"])(url)
except ValidationError:
return JsonResponse({"error": "Valid URL required."}, status=422)
expires_at = datetime.now(tz=timezone.utc) + timedelta(hours=1)
try:
short_link, _ = ShortLink.objects.update_or_create(
url=url, alias=get_alias(url), defaults={"expires_at": expires_at}
)
except IntegrityError:
# Add timestamp as randoms to avoid collision
short_link = ShortLink.objects.create(
url=url, alias=get_alias(f"{url}{datetime.utcnow()}"), expires_at=expires_at
)
return JsonResponse(
{
"short_link": short_link.build_url(),
"expires_at": expires_at.isoformat(),
},
status=201,
) | 2,469 |
def get_mapped_tracks_file(mode='r', **kwargs):
""" Returns a file descriptor-like object to the file containing the
raw track.
(A mapped track is a collection of tuples). Each tuple is:
- linking pairs
- paths
- linking pairs
- points
Arguments:
mode: r/w mode
driver_id: string, the id of the driver
"""
fname = get_data_dir() + Template(mapped_tracks_tpl).substitute(**kwargs)
return gzip.open(fname, mode) | 2,470 |
def edit_comment(comment_entity, new_comment_text):
"""Edits a comment on a blog entry"""
comment_entity.comment = new_comment_text
comment_entity.put() | 2,471 |
def ne_2beta(r, ne0, rc_outer, beta_outer, f_inner, rc_inner, beta_inner):
"""
Electron number density [cm^-3] in the double-beta profile of the hydrostratic equilibrium model.
r : distance from the center of the cluster [kpc]
ne0 : central electron number density [cm^-3]
rc_outer : core radius from the outer component [kpc] (default: 100.)
beta_outer : slope from the outer component (default: 1.)
f_inner : fractional contribution from inner component (default: 0.)
rc_inner : core radius from the inner component [kpc] (default: 10.)
beta_inner : slope from the inner component (default: 1.)
"""
def outer(rr): return (1. + rr**2./rc_outer **
2.)**(-1.5*beta_outer) # outer contribution
def inner(rr): return (1. + rr**2./rc_inner **
2.)**(-1.5*beta_inner) # inner contribution
return ne0*(f_inner*inner(r) + (1.-f_inner)*outer(r)) | 2,472 |
def create_tables():
"""Recreate the database tables."""
from untracked_config.development_node import ON_DEV_NODE
lg.debug('Rebuilding database tables.')
# this section is to remove the old database table if the DefectModel table needs to be changed:
if ON_DEV_NODE:
fsa.drop_all() # TODO: this is for model/table development only and SHOULD NOT be used with production
# this ensures there is a table there
fsa.create_all() | 2,473 |
def fast_fdividef(x, y):
"""
See https://docs.nvidia.com/cuda/libdevice-users-guide/__nv_fast_fdividef.html
:param x: Argument.
:type x: float32
:param y: Argument.
:type y: float32
:rtype: float32
""" | 2,474 |
def test_that_all_entries_dicts_match(load_template_test):
"""Test that the values of the dicts returned are the same as the control.
This test swaps place with the processed and reference variable so it's not run
in the same order as test_correct_data_in_entries to catch dicts with extra keys
This will create a test for each of the files in the test_collection
variable.
"""
processed, reference = raw_template_test(load_template_test)
# Can be uncommented if we don't care that the parsed data isn't
# in the same order as the raw data
# reference = sorted(reference)
# processed = sorted(processed)
for i in range(len(processed)):
assert processed[i] == reference[i] | 2,475 |
def sell():
"""Sell shares of stock"""
"""Sell shares of stock"""
if request.method == "POST":
symbol = request.form.get("symbol")
amount = request.form.get("shares")
try:
amount = int(amount)
except:
return apology("enter a proper value")
print(amount)
if not symbol:
return apology("Missing stock symbol!")
elif not amount:
return apology("Missing number of shares!")
elif int(amount)<= 0:
return apology("enter a proper value")
else:
amount = int(amount)
stock = lookup(symbol)
if stock is None:
return apology("invalid symbol")
rows = db.execute("""
SELECT symbol,SUM(shares) as totalShares
FROM history
WHERE user_id=:user_id
GROUP BY symbol
HAVING totalShares >0;"""
,user_id=session["user_id"])
for row in rows:
if row["symbol"] == symbol:
if amount >row["totalShares"]:
return apology("too many shares")
price = float(stock["price"])
rows = db.execute("SELECT cash FROM users WHERE id=:id",id=session["user_id"])
cash = rows[0]["cash"]
updated_cash = cash + amount * price
db.execute("UPDATE users SET cash=:updated_cash WHERE id=:id",updated_cash=updated_cash,id=session["user_id"])
db.execute("INSERT INTO history (user_id,stock_name,shares,price,symbol) VALUES (:user_id,:stock_name,:shares,:price,:symbol)",user_id=session["user_id"],stock_name=stock['name'],shares= -1*int(amount),price=stock['price'],symbol=symbol)
flash("Sold!!")
return redirect("/stock")
else:
rows = db.execute("""
SELECT symbol FROM history WHERE user_id=:user_id GROUP BY symbol HAVING SUM(shares) >0;
""", user_id=session["user_id"])
return render_template("sellstock.html", symbols = [row["symbol"] for row in rows]) | 2,476 |
def log_error(e):
"""
It is always a good idea to log errors.
This function just prints them, but you can
make it do anything.
"""
print(e) | 2,477 |
def print_sentiments(sentences):
"""Given a list of sentences, prints sentiment information"""
for sentence in sentences:
print(sentence)
ss = sid.polarity_scores(sentence)
for k in sorted(ss):
print('{0}: {1}, '.format(k, ss[k]), end='')
print() | 2,478 |
def config_prime(input_file, output_file, \
index_first_dynamic_arg, num_dynamic_args, \
spec_only_globals):
"""
Execute the program until a branch condition is unknown.
index_first_dynamic_arg is a number starting at 1.
num_dynamic_args is a non-negative number.
"""
## TODOX: find subset of -O1 that simplify loops for dominance queries
args = ['-O1'] # '-loop-simplify', '-simplifycfg'
args += ['-Pconfig-prime']
# index = 0
# for x in known_args:
# if index == 0:
# args.append('-Pconfig-prime-file=\"{0}\"'.format(x))
# else:
# args.append('-Pconfig-prime-input-arg=\"{0}\"'.format(x))
# index += 1
args.append('-Pconfig-prime-index-first-unknown-arg={0}'.format(index_first_dynamic_arg))
args.append('-Pconfig-prime-unknown-args={0}'.format(num_dynamic_args))
if spec_only_globals:
args.append('-Pconfig-prime-specialize-only-globals=true')
else:
args.append('-Pconfig-prime-specialize-only-globals=false')
###----------------------------------------------------------------###
## https://code.woboq.org/userspace/glibc/posix/getopt.c.html#58
###----------------------------------------------------------------###
## If the environment variable POSIXLY_CORRECT is not set then
## getopt will permute the argv’s elements so that all the ones
## started with '-' come first but only if you use the glibc
## version. That is, if you call directly getopt directly (e.g.,
## if the program is linked with musllvm) then getopt does not
## permute elements. The permutation must be disabled because it
## will go through the dynamic arguments and check if it starts
## with `-`. However, the dynamic arguments cannot be accessed
## during the configuration priming because they are undefined at
## that time.
key = 'POSIXLY_CORRECT'
# remember value
old_val = os.getenv(key)
# set the environment variable to any value
os.environ[key] = '1'
# call configuration priming
driver.previrt(input_file, output_file, args)
## Update old value of the environment variable
if old_val is None:
os.unsetenv(key)
else:
os.environ[key] = old_val | 2,479 |
def assemble_block(n_rows: Int, n_cols: Int, pdf: pd.DataFrame, cov_matrix: NDArray[(Any, Any),
Float],
row_mask: NDArray[Any]) -> NDArray[Float]:
"""
Creates a dense n_rows by n_cols matrix from the array of either sparse or dense vectors in the Pandas DataFrame
corresponding to a group. This matrix represents a block.
Args:
n_rows : The number of rows in the resulting matrix
n_cols : The number of columns in the resulting matrix
pdf : Pandas DataFrame corresponding to a group
cov_matrix: 2D numpy array representing covariate columns that should be prepended to matrix X from the block. Can be
empty if covariates are not being applied.
row_mask: 1D numpy array of size n_rows containing booleans used to mask rows from the block X before
return.
Returns:
Dense n_rows - n_masked by n_columns matrix where the columns have been 0-centered and standard scaled.
"""
mu = pdf['mu'].to_numpy()
sig = pdf['sig'].to_numpy()
if 0 in sig:
raise ValueError(f'Standard deviation cannot be 0.')
if row_mask.size == 0:
row_mask = np.full(n_rows, True)
if 'indices' not in pdf.columns:
X_raw = np.column_stack(pdf['values'].array)
else:
X_raw = np.zeros([n_rows, n_cols])
for column, row in enumerate(pdf[['indices', 'values']].itertuples()):
X_raw[row.indices, column] = row.values
X = ((X_raw - mu) / sig)
if cov_matrix.any():
return np.column_stack((cov_matrix, X))[row_mask, :]
else:
return X[row_mask, :] | 2,480 |
def calc_xixj_from_braggphi(
det_cent=None,
det_nout=None, det_ei=None, det_ej=None,
det_outline=None,
summit=None, nout=None, e1=None, e2=None,
bragg=None, phi=None,
option=None, strict=None,
):
""" Several options for shapes
de_cent, det_nout, det_ei and det_ej are always of shape (3,)
option:
0:
(summit, e1, e2).shape = (3,)
(bragg, phi).shape = (nbragg,)
=> (xi, xj).shape = (nbragg,)
1:
(summit, e1, e2).shape = (3, nlamb, npts, nbragg)
(bragg, phi).shape = (nlamb, npts, nbragg)
=> (xi, xj).shape = (nlamb, npts, nbragg)
"""
# check inputs
if strict is None:
strict = True
# Check option
gdet = [det_cent, det_nout, det_ei, det_ej]
g0 = [summit, nout, e1, e2]
g1 = [bragg, phi]
# check nbroadcastable
_are_broadcastable(bragg=bragg, phi=phi)
assert all([gg.shape == (3,) for gg in gdet]), "gdet no broadcast!"
assert all([gg.shape == g0[0].shape for gg in g0]), "g0 no broadcast!"
lc = [
g0[0].size == 3 and g1[0].ndim == 1,
g0[0].ndim in [4, 5] and g0[0].shape[0] == 3
and phi.shape == g0[0].shape[1:],
]
if np.sum(lc) == 0:
lstr = [
'\t- {}: {}'.format(kk, vv.shape)
for kk, vv in [
('summit', summit), ('nout', nout), ('e1', e1), ('e2', e2),
('bragg', bragg), ('phi', phi),
]
]
msg = (
"Please provide either:\n"
+ "\t- option 0:\n"
+ "\t\t- (summit, nout, e1, e2).shape[0] = 3\n"
+ "\t\t- (bragg, phi).ndim = 1\n"
+ "\t- option 1:\n"
+ "\t\t- (summit, nout, e1, e2).ndim in [4, 5]\n"
+ "\t\t- (bragg, phi).shape[0] = 3\n\n"
+ "You provided:\n"
+ "\n".join(lstr)
)
raise Exception(msg)
elif all(lc):
msg = ("Multiple options!")
raise Exception(msg)
if option is None:
option = lc.index(True)
assert (lc[0] and option == 0) or (lc[1] and option == 1)
if option == 0:
summit = summit.ravel()
nout, e1, e2 = nout.ravel(), e1.ravel(), e2.ravel()
det_cent = det_cent[:, None]
det_nout = det_nout[:, None]
det_ei, det_ej = det_ei[:, None], det_ej[:, None]
summit, nout = summit[:, None], nout[:, None],
e1, e2 = e1[:, None], e2[:, None]
else:
det_cent = det_cent[:, None, None, None]
det_nout = det_nout[:, None, None, None]
det_ei = det_ei[:, None, None, None]
det_ej = det_ej[:, None, None, None]
if g0[0].ndim == 5:
det_cent = det_cent[..., None]
det_nout = det_nout[..., None]
det_ei = det_ei[..., None]
det_ej = det_ej[..., None]
# Not necessary for broadcasting (last dims first)
# bragg = bragg[None, ...]
# phi = phi[None, ...]
# Compute
vect = (
-np.sin(bragg)*nout
+ np.cos(bragg)*(np.cos(phi)*e1 + np.sin(phi)*e2)
)
k = np.sum(
(det_cent-summit)*det_nout, axis=0
) / np.sum(vect*det_nout, axis=0)
pts = summit + k[None, ...]*vect
xi = np.sum((pts - det_cent)*det_ei, axis=0)
xj = np.sum((pts - det_cent)*det_ej, axis=0)
# Optional: eliminate points outside the det outline
if det_outline is not None and strict is True:
ind = (
(xi < np.min(det_outline[0, :]))
| (xi > np.max(det_outline[0, :]))
| (xj < np.min(det_outline[1, :]))
| (xj > np.max(det_outline[1, :]))
)
xi[ind] = np.nan
xj[ind] = np.nan
return xi, xj, strict | 2,481 |
def test_null_desc():
"""
The default descriptor is not actually defined, but should be NULL
"""
default = descriptor.build()
assert default == ffi.NULL | 2,482 |
def _check_index_good(X):
"""Check the index of X and return boolean."""
# check the first index elements for "__total"
tot_chk = np.any(X.index.get_level_values(level=0).isin(["__total"]))
return tot_chk | 2,483 |
def seguimientos_list_csv(request, codigo):
"""Lista todos los eventos de seguimiento para cada proyecto de ley.
---
type:
codigo:
required: true
type: string
parameters:
- name: codigo
description: código del proyecto de ley incluyendo legislatura, por ejemplo 00002-2011
type: string
paramType: path
required: true
"""
codigo, legislatura = split_code_input(codigo)
try:
proy = Proyecto.objects.get(
codigo=codigo,
legislatura=legislatura,
)
except Proyecto.DoesNotExist:
msg = 'error,proyecto no existe'
return HttpResponse(msg, content_type='text/csv')
seguimientos = get_seguimientos_from_proyecto_id(proy.id)
seguimientos.append({
'headline': 'Fecha de presentación',
'startDate': convert_date_to_string(proy.fecha_presentacion),
})
proyecto = "Proyecto No: " + str(proy.numero_proyecto).replace("/", "_")
data = []
for i in seguimientos:
data.append({
'proyecto': proyecto,
'headline': i['headline'],
'startDate': i['startDate'].replace(',', '-'),
})
if request.method == 'GET':
return CSVResponse(data) | 2,484 |
def endpoint_error(estimate, ground_truth):
"""Computes the average end-point error of the optical flow estimates."""
error = torch.norm(
estimate - ground_truth[:, :2, :, :], 2, 1, keepdim=False)
if ground_truth.size(1) == 3:
mask = (ground_truth[:, 2, :, :] > 0).float()
else:
mask = torch.ones_like(error)
epe = error * mask
epe = torch.sum(epe, (1, 2)) / torch.sum(mask, (1, 2))
return epe.mean().reshape(1) | 2,485 |
def output(log_message=None, success_message=None,
fail_message=None):
"""This is a decorator to trap the typical exceptions that occur
when applying and removing modules. It returns the proper output
corresponding to the error messages automatically. If the function
returns output (success_flag, message) then those are returned,
otherwise success is assumed and the success_message returned.
Using this removes a lot of potential boiler-plate code, however
it is not necessary.
Keyword arguments can be used in the message string. Default
values can be found in the message_args @property, however a
driver can add whatever it see fit, by setting message_args
to a dict in the configure call (see above). Thus if you set
self.message_args = {'my_key': 'my_key_val'} then the message
string could look like "My key is '$(my_key)s'".
"""
success_message = success_message or "Success"
fail_message = fail_message or "Fail"
def output_decorator(func):
"""This is the actual decorator."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
"""Here's where we handle the error messages and return values
from the actual function.
"""
log_msg = log_message
success_msg = success_message
fail_msg = fail_message
if isinstance(args[0], ModuleDriver):
# Try and insert any message args if they exist in the driver
message_args = args[0].message_args
if message_args:
try:
log_msg = log_msg % message_args
success_msg = success_msg % message_args
fail_msg = fail_msg % message_args
except Exception:
# if there's a problem, just log it and drive on
LOG.warning(_("Could not apply message args: %s") %
message_args)
pass
if log_msg:
LOG.info(log_msg)
success = False
try:
rv = func(*args, **kwargs)
if rv:
# Use the actual values, if there are some
success, message = rv
else:
success = True
message = success_msg
except exception.ProcessExecutionError as ex:
message = (_("%(msg)s: %(err)s") %
{'msg': fail_msg, 'err': ex.stderr})
LOG.exception(message)
except exception.TroveError as ex:
message = (_("%(msg)s: %(err)s") %
{'msg': fail_msg, 'err': ex._error_string})
LOG.exception(message)
except Exception as ex:
message = (_("%(msg)s: %(err)s") %
{'msg': fail_msg, 'err': ex.message})
LOG.exception(message)
return success, message
return wrapper
return output_decorator | 2,486 |
def custom_mape(approxes, targets):
"""Competition metric is a slight variant on MAPE."""
nominator = np.abs(np.subtract(approxes, targets))
denominator = np.maximum(np.abs(targets), 290000)
return np.mean(nominator / denominator) | 2,487 |
def article_text_to_dict(article_text: str):
"""
Translates an article text into a dict.
"""
data = collections.defaultdict(list)
field = ''
for line in re.split(r'\n+', article_text):
# Fix little bug with isi files
if line.startswith('null'):
line = line[4:]
name = line[:2]
value = line[3:]
if not name.isspace():
field = name
if not field.isspace() and field != 'ER':
data[field].append(value)
return dict(data) | 2,488 |
def add_product(basket, price=None, quantity=1, product=None):
"""
Helper to add a product to the basket.
"""
if not hasattr(basket, 'strategy'):
basket.strategy = strategy.Default()
if price is None:
price = D('1')
if product and product.has_stockrecords:
record = product.stockrecords.all()[0]
else:
record = factories.create_stockrecord(
product=product, price_excl_tax=price,
num_in_stock=quantity + 1)
basket.add_product(record.product, quantity) | 2,489 |
def frohner_cor_3rd_order(sig1,sig2,sig3,n1,n2,n3):
"""
Takes cross-sections [barns] and atom densities [atoms/barn] for
three thicknesses of the same sample, and returns extrapolated
cross section according to Frohner.
Parameters
----------
sig1 : array_like
Cross section of the thinnest of the three samples.
sig2 : array_like
Cross section of the mid-thickness of the three samples.
sig3 : array_like
Cross section of the thickest of the three samples.
n1 : float
Atom density of the thinnest sample
n2 : float
Atom density of the mid-thickness sample
n3 : float
Atom density of the thickest sample
Returns
-------
sig0 : array_like
The extrapolated cross section from sig1, sig2, and sig3
"""
# two terms in the numerator
numer1 = (n1*sig2-n2*sig1)*(n3**2-n1**2-(n1-n3)/(n1-n2)*(n2**2-n1**2))
numer2 = (n1*n2**2-n1**2*n2)*(sig3-sig2-(n1-n3)/(n1-n2)*(sig2-sig1))
denom = (n1-n2)*(n3**2-n1**2) - (n1-n3)*(n2**2-n1**2)
return (numer1-numer2)/denom | 2,490 |
def nodeInTree(root, k):
"""
Checks if the node exists in the tree or not
"""
if root == None:
return False
if root.data == k or nodeInTree(root.left, k) or nodeInTree(root.right, k):
return True
return False | 2,491 |
def read_xvec(path, base_type, N=-1):
"""
A utility function to read YAEL format files
(xxx.ivec, xxx.fvec, and xxx.bvec)
:param path: The path of xvec file
:param base_type: The type of xvec; 'f', 'i' or 'b'
:param N: The number of vectors to be read. If this is not specified, read all
:return: a N x dim array
"""
import struct # for handling binary data
import os
assert(base_type == 'f' or base_type == 'i' or base_type == 'b')
if base_type == "b": # Convert 'b' to 'B' because unsinged char is 'B' for struct
base_type = 'B'
data_type, byte_size = {
'f': (np.float32, 4),
'i': (np.int32, 4),
'B': (np.uint8, 1)
}[base_type]
size = os.path.getsize(path)
with open(path, 'rb') as fin:
bdata = fin.read(4) # the first 4 byte is a number of dimension
dim = struct.unpack('i', bdata)[0]
N_all = int(size / (4 + dim * byte_size)) # All size
if N == -1:
N = N_all
assert(N <= N_all)
fin.seek(0) # Set cursor to the initial position
vec = np.empty((N, dim), dtype=data_type)
for n in range(N):
bdata = fin.read(4) # the fist 4 byte is always dim, so skip it
bdata = fin.read(byte_size * dim) # Read a vector
vec[n, :] = np.array(struct.unpack(base_type * dim, bdata), dtype=data_type)
return vec | 2,492 |
def make_dir(path):
"""Make directory.
Args:
path (str): absolute path for directory
Raise error if error other than directory exists occurs.
"""
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise | 2,493 |
def test_init(app):
"""Test initialisation."""
auth = OktaOAuth(app)
assert auth.app is app | 2,494 |
def split_and_filter(intermediate_str, splitter):
"""
Split string with given splitter - practically either one of "," or "/'".
Then filter ones that includes "https" in the split pickles
:param intermediate_str : string that in the middle of parsing
:param splitter
:return: chunk of string(s) as a list
"""
intermediate_split = intermediate_str.split(splitter)
intermediate_filter = [elem for elem in intermediate_split
if 'https' in elem]
return intermediate_filter[0] | 2,495 |
def test_hrf_deriv_timing():
"""Test some timing aspects of the HRF and its derivative."""
hrf = glm.GammaDifferenceHRF(temporal_deriv=True, oversampling=100)
y, dy = hrf.kernel.T
nt.assert_greater(np.argmax(y), np.argmax(dy))
npt.assert_almost_equal(dy[np.argmax(y)], 0, 4) | 2,496 |
def main(n, n_boulders, b_down, state_to_index, index_to_state, boulder_positions, state_reward, full_states):
"""Finds and saves optimal policy and values to json file.
Parameters:
n: number of rows/columns
b_down: possible next boulder states
state_to_index/index_to_state: dictionaries to convert states to index
boulder_positions: possible boulder states
state_reward: reward of moving to state
full_states: all possible states given n and n_boulders
Returns: None
"""
actions = [0,1,2]; state_down = {}; full_state_to_index={}; nS = len(full_states); nA = len(actions)
# gen conversion dictionaries
for k,v in b_down.items():
vals = [index_to_state[a] for a in v]
state_down[index_to_state[k]] = vals
for s in range(len(full_states)):
full_state_to_index[full_states[s]] = s
# create P
P=[]
for state in full_states:
action_list=[]
for a in actions:
action_list.append(movement_tuples(state,a, n, state_down, boulder_positions, state_reward))
P.append(action_list)
policy, v = policy_improvement(nS, nA, P, full_state_to_index, g = .5)
policy_actions = [int(np.argmax(p)) for p in policy]
data = {'Policy':policy_actions, 'Values':v.tolist()}
with open('PolicyIterationResults/data{}_{}.json'.format(n,n_boulders), 'w') as f:
json.dump(data, f)
print(policy_actions.count(0),policy_actions.count(1),policy_actions.count(2)) | 2,497 |
def organism_code(genus, species):
"""Return code from genus and species."""
return (
f"{genus[:GENUS_CODE_LEN].lower()}{species[:SPECIES_CODE_LEN].lower()}"
) | 2,498 |
def verify_image(filename):
"""Verifies whether the file exists"""
image_extensions = ['tif', 'jpg', 'gif', 'png', 'jpeg']
if type(filename) is str:
extension = filename.split('.')
if len(extension) == 2:
if extension[1].lower() in image_extensions:
return os.path.isfile(filename)
return False | 2,499 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.