content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def api(default=None, api=None, **kwargs):
"""Returns the api instance in which this API function is being ran"""
return api or default | 4,600 |
def parse_json(data):
"""Parses the PupleAir JSON file, returning a Sensors protobuf."""
channel_a = []
channel_b = {}
for result in data["results"]:
if "ParentID" in result:
channel_b[result["ParentID"]] = result
else:
channel_a.append(result)
sensors = list(_parse_results(channel_a, channel_b))
return model_pb2.Sensors(sensors=sensors) | 4,601 |
def asynchronize_tornado_handler(handler_class):
"""
A helper function to turn a blocking handler into an async call
:param handler_class: a tornado RequestHandler which should be made asynchronus
:return: a class which does the same work on a threadpool
"""
class AsyncTornadoHelper(handler_class):
"""
A hollow wrapper class which runs requests asynchronously on a threadpool
"""
def _do_work_and_report_error(self, work):
try:
# call the "real" method from the handler_class
work()
except HTTPError as ex:
# request handler threw uncaught error
logging.error(traceback.format_exc())
# send http errors to client
self.write(str(ex))
self.set_status(ex.status_code)
except Exception:
# request handler threw uncaught error
logging.error(traceback.format_exc())
# send 500 error to client. Do not pass on error message
self.write("500 Internal Server Error \n")
self.set_status(500)
finally:
# finished needs to be reported from main tornado thread
tornado.ioloop.IOLoop.instance().add_callback(
# report finished to main tornado thread:
lambda: self.finish()
)
@asynchronous
def get(self, path=None):
# bind the "real" method from the handler_class to run in another thread
blocking_method = lambda: self._do_work_and_report_error(
lambda: handler_class.get(self, path))
# launch in another thread
REQUEST_HANDLER_THREAD_POOL.run_as_asynch(blocking_method)
@asynchronous
def put(self, path=None):
# bind the "real" method from the handler_class to run in another thread
blocking_method = lambda: self._do_work_and_report_error(
lambda: handler_class.put(self, path))
# launch in another thread
REQUEST_HANDLER_THREAD_POOL.run_as_asynch(blocking_method)
@asynchronous
def post(self, path=None):
# bind the "real" method from the handler_class to run in another thread
blocking_method = lambda: self._do_work_and_report_error(
lambda: handler_class.post(self, path))
# launch in another thread
REQUEST_HANDLER_THREAD_POOL.run_as_asynch(blocking_method)
# return the wrapped class instead of the original for Tornado to run asynchronously
return AsyncTornadoHelper | 4,602 |
def check_regex(regexstring):
""" method to check that the regex works"""
if regexstring is not None:
try:
compiledregex = re.compile(regexstring, flags=re.IGNORECASE)
# result = compiledregex.match(string)
except:
raise click.BadOptionUsage("The regex didn't compile. For correct usage"
" see: https://docs.python.org/2/library/re.html") | 4,603 |
def convert_single_example(ex_index, example: InputExample,
tokenizer, label_map, dict_builder=None):
"""Converts a single `InputExample` into a single `InputFeatures`."""
# label_map = {"B": 0, "M": 1, "E": 2, "S": 3}
# tokens_raw = tokenizer.tokenize(example.text)
tokens_raw = list(example.text)
labels_raw = example.labels
# Account for [CLS] and [SEP] with "- 2"
# The convention in BERT is:
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
label_ids = []
for token, label in zip(tokens_raw, labels_raw):
tokens.append(token)
label_ids.append(label_map[label])
input_ids = tokenizer.convert_tokens_to_ids(tokens)
if dict_builder is None:
input_dicts = np.zeros_like(tokens_raw, dtype=np.int64)
else:
input_dicts = dict_builder.extract(tokens)
seq_length = len(tokens)
assert seq_length == len(input_ids)
assert seq_length == len(input_dicts)
assert seq_length == len(label_ids)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
if ex_index < 1:
tf.logging.info("*** Example ***")
tf.logging.info("guid: %s" % example.guid)
tf.logging.info("tokens: %s" % " ".join(
[utils.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_dicts]))
tf.logging.info("labels: %s" % " ".join([str(x) for x in example.labels]))
tf.logging.info("labels_ids: %s" % " ".join([str(x) for x in label_ids]))
feature = InputFeatures(
input_ids=input_ids,
input_dicts=input_dicts,
label_ids=label_ids,
seq_length=seq_length)
return feature | 4,604 |
def create_scenariolog(sdata, path, recording, logfilepath):
"""
シナリオのプレイデータを記録したXMLファイルを作成する。
"""
element = cw.data.make_element("ScenarioLog")
# Property
e_prop = cw.data.make_element("Property")
element.append(e_prop)
e = cw.data.make_element("Name", sdata.name)
e_prop.append(e)
e = cw.data.make_element("WsnPath", sdata.fpath)
e_prop.append(e)
e = cw.data.make_element("RoundAutoStart", str(sdata.autostart_round))
e_prop.append(e)
e = cw.data.make_element("NoticeInfoView", str(sdata.notice_infoview))
e_prop.append(e)
e = cw.data.make_element("PartyEnvironment")
if not sdata.party_environment_backpack:
e.append(cw.data.make_element("Backpack", "Disable"))
if len(e):
e_prop.append(e)
if cw.cwpy.setting.write_playlog:
e = cw.data.make_element("LogFile", logfilepath)
e_prop.append(e)
if cw.cwpy.areaid >= 0:
areaid = cw.cwpy.areaid
elif cw.cwpy.pre_areaids:
areaid = cw.cwpy.pre_areaids[0][0]
else:
areaid = 0
if not recording:
e = cw.data.make_element("Debug", str(cw.cwpy.debug))
e_prop.append(e)
e = cw.data.make_element("AreaId", str(areaid))
e_prop.append(e)
e_music = cw.data.make_element("MusicPaths")
for i, music in enumerate(cw.cwpy.music):
if music.path.startswith(cw.cwpy.skindir):
fpath = music.path.replace(cw.cwpy.skindir + "/", "", 1)
else:
fpath = music.path.replace(sdata.scedir + "/", "", 1)
e = cw.data.make_element("MusicPath", fpath, attrs={"channel": str(music.channel),
"volume": str(music.subvolume),
"loopcount": str(music.loopcount),
"inusecard": str(music.inusecard)})
e_music.append(e)
e_prop.append(e_music)
e = cw.data.make_element("Yado", cw.cwpy.ydata.name)
e_prop.append(e)
e = cw.data.make_element("Party", cw.cwpy.ydata.party.name)
e_prop.append(e)
# bgimages
e_bgimgs = cw.data.make_element("BgImages")
element.append(e_bgimgs)
def make_colorelement(name, color):
e = cw.data.make_element(name, attrs={"r": str(color[0]),
"g": str(color[1]),
"b": str(color[2])})
if 4 <= len(color):
e.set("a", str(color[3]))
else:
e.set("a", "255")
return e
for bgtype, d in cw.cwpy.background.bgs:
if bgtype == cw.sprite.background.BG_IMAGE:
fpath, inusecard, scaledimage, mask, smoothing, size, pos, flag, visible, layer, cellname = d
attrs = {"mask": str(mask), "visible": str(visible)}
if cellname:
attrs["cellname"] = cellname
if smoothing <> "Default":
attrs["smoothing"] = smoothing
e_bgimg = cw.data.make_element("BgImage", attrs=attrs)
if inusecard:
e = cw.data.make_element("ImagePath", fpath, attrs={"inusecard":str(inusecard),
"scaledimage": str(scaledimage)})
else:
e = cw.data.make_element("ImagePath", fpath)
e_bgimg.append(e)
elif bgtype == cw.sprite.background.BG_TEXT:
text, namelist, face, tsize, color, bold, italic, underline, strike, vertical, antialias,\
btype, bcolor, bwidth, loaded, updatetype, size, pos, flag, visible, layer, cellname = d
attrs = {"visible": str(visible),
"loaded": str(loaded)}
if cellname:
attrs["cellname"] = cellname
e_bgimg = cw.data.make_element("TextCell", attrs=attrs)
e = cw.data.make_element("Text", text)
e_bgimg.append(e)
e = cw.data.make_element("Font", face, attrs={"size": str(tsize),
"bold": str(bold),
"italic": str(italic),
"underline": str(underline),
"strike": str(strike)})
e_bgimg.append(e)
e = cw.data.make_element("Vertical", str(vertical))
e_bgimg.append(e)
e = cw.data.make_element("Antialias", str(antialias))
e_bgimg.append(e)
e = make_colorelement("Color", color)
e_bgimg.append(e)
e = cw.data.make_element("UpdateType", updatetype)
e_bgimg.append(e)
if btype <> "None":
e = cw.data.make_element("Bordering", attrs={"type": btype,
"width": str(bwidth)})
e.append(make_colorelement("Color", bcolor))
e_bgimg.append(e)
if namelist:
e = cw.data.make_element("Names")
for item in namelist:
e_name = cw.data.make_element("Name", unicode(item.name))#PyLite:コモンのnumberがバグるためUnicodeに変換
if isinstance(item.data, cw.data.YadoData):
e_name.set("type", "Yado")
elif isinstance(item.data, cw.data.Party):
e_name.set("type", "Party")
elif isinstance(item.data, cw.character.Player) and item.data in cw.cwpy.get_pcards():
e_name.set("type", "Player")
e_name.set("number", str(cw.cwpy.get_pcards().index(item.data)+1))
elif isinstance(item.data, cw.data.Flag):
e_name.set("type", "Flag")
e_name.set("flag", item.data.name)
elif isinstance(item.data, cw.data.Step):
e_name.set("type", "Step")
e_name.set("step", item.data.name)
elif isinstance(item.data, cw.data.Variant):
e_name.set("type", "Variant")
e_name.set("variant", item.data.name)
e_name.set("valuetype", item.data.type)
elif item.data == "Number":
e_name.set("type", "Number")
e.append(e_name)
e_bgimg.append(e)
elif bgtype == cw.sprite.background.BG_COLOR:
blend, color1, gradient, color2, size, pos, flag, visible, layer, cellname = d
attrs = {"visible": str(visible)}
if cellname:
attrs["cellname"] = cellname
e_bgimg = cw.data.make_element("ColorCell", attrs=attrs)
e = cw.data.make_element("BlendMode", blend)
e_bgimg.append(e)
e = make_colorelement("Color", color1)
e_bgimg.append(e)
if gradient <> "None":
e = cw.data.make_element("Gradient", attrs={"direction": gradient})
e.append(make_colorelement("EndColor", color2))
e_bgimg.append(e)
elif bgtype == cw.sprite.background.BG_PC:
pcnumber, expand, smoothing, size, pos, flag, visible, layer, cellname = d
attrs = {"visible": str(visible),
"expand": str(expand)}
if cellname:
attrs["cellname"] = cellname
if smoothing <> "Default":
attrs["smoothing"] = smoothing
e_bgimg = cw.data.make_element("PCCell", attrs=attrs)
e = cw.data.make_element("PCNumber", str(pcnumber))
e_bgimg.append(e)
else:
assert bgtype == cw.sprite.background.BG_SEPARATOR
e_bgimg = cw.data.make_element("Redisplay")
e_bgimgs.append(e_bgimg)
continue
e = cw.data.make_element("Flag", flag)
e_bgimg.append(e)
e = cw.data.make_element("Location",
attrs={"left": str(pos[0]), "top": str(pos[1])})
e_bgimg.append(e)
e = cw.data.make_element("Size",
attrs={"width": str(size[0]), "height": str(size[1])})
e_bgimg.append(e)
if layer <> cw.LAYER_BACKGROUND:
e = cw.data.make_element("Layer", str(layer))
e_bgimg.append(e)
e_bgimgs.append(e_bgimg)
# カード再配置情報
if cw.cwpy.sdata.moved_mcards:
e_movedmcards = cw.data.make_element("MovedCards")
for (cardgroup, index), (x, y, scale, layer) in cw.cwpy.sdata.moved_mcards.iteritems():
e_movedmcard = cw.data.make_element("MovedCard", attrs={"cardgroup":cardgroup,
"index":str(index)})
e_movedmcard.append(cw.data.make_element("Location", attrs={"left":str(x),
"top":str(y)}))
if scale <> -1:
e_movedmcard.append(cw.data.make_element("Size", attrs={"scale":str(scale)}))
if layer <> -1:
e_movedmcard.append(cw.data.make_element("Layer", str(layer)))
if len(e_movedmcard):
e_movedmcards.append(e_movedmcard)
element.append(e_movedmcards)
# flag
e_flag = cw.data.make_element("Flags")
element.append(e_flag)
if cw.cwpy.setting.enable_equalbug:
for name, flag in sdata.flags.iteritems():
if name.find(u"=") == -1: # BUG:PyLite :「=」を含む変数はセーブされない(1.50変数バグ)
e = cw.data.make_element("Flag", name, {"value": str(flag.value)})
e_flag.append(e)
else:
for name, flag in sdata.flags.iteritems():
e = cw.data.make_element("Flag", name, {"value": str(flag.value)})
e_flag.append(e)
# step
e_step = cw.data.make_element("Steps")
element.append(e_step)
if cw.cwpy.setting.enable_equalbug:
for name, step in sdata.steps.iteritems():
if name.find(u"=") == -1: # BUG:PyLite :「=」を含む変数はセーブされない(1.50変数バグ)
e = cw.data.make_element("Step", name, {"value": str(step.value)})
e_step.append(e)
else:
for name, step in sdata.steps.iteritems():
e = cw.data.make_element("Step", name, {"value": str(step.value)})
e_step.append(e)
# variant
if sdata.variants:
e_variant = cw.data.make_element("Variants")
element.append(e_variant)
for name, variant in sdata.variants.iteritems():
#PyLite:UnicodeErrorを回避
e = cw.data.make_element("Variant", name, {"type": variant.type, "value": unicode(variant.value)})
e_variant.append(e)
if not recording:
# gossip
e_gossip = cw.data.make_element("Gossips")
element.append(e_gossip)
for key, value in sdata.gossips.iteritems():
e = cw.data.make_element("Gossip", key, {"value": str(value)})
e_gossip.append(e)
# completestamps
e_compstamp = cw.data.make_element("CompleteStamps")
element.append(e_compstamp)
for key, value in sdata.compstamps.iteritems():
e = cw.data.make_element("CompleteStamp", key, {"value": str(value)})
e_compstamp.append(e)
# InfoCard
e_info = cw.data.make_element("InfoCards")
element.append(e_info)
for resid in sdata.get_infocards(order=True):
e = cw.data.make_element("InfoCard", str(resid))
e_info.append(e)
# FriendCard
e_cast = cw.data.make_element("CastCards")
element.append(e_cast)
for fcard in sdata.friendcards:
e_cast.append(fcard.data.getroot())
if not recording:
# DeletedFile
e_del = cw.data.make_element("DeletedFiles")
element.append(e_del)
for fpath in sdata.deletedpaths:
e = cw.data.make_element("DeletedFile", fpath)
e_del.append(e)
# LostAdventurer
e_lost = cw.data.make_element("LostAdventurers")
element.append(e_lost)
for fpath in sdata.lostadventurers:
e = cw.data.make_element("LostAdventurer", fpath)
e_lost.append(e)
# ファイル書き込み
etree = cw.data.xml2etree(element=element)
etree.write(path)
return path | 4,605 |
async def converter_self_interaction_target(client, interaction_event):
"""
Internal converter for returning the received interaction event's target. Applicable for context application
commands.
This function is a coroutine.
Parameters
----------
client : ``Client``
The client who received the respective ``InteractionEvent``.
interaction_event : ``InteractionEvent``
The received application command interaction.
Returns
-------
target : `None` or ``DiscordEntity``
The resolved entity if any.
"""
if interaction_event.type is not INTERACTION_TYPE_APPLICATION_COMMAND:
return None
return interaction_event.interaction.target | 4,606 |
def publication_pages(publication_index_page) -> List[PublicationPage]:
"""Fixture providing 10 PublicationPage objects attached to publication_index_page"""
rv = []
for _ in range(0, 10):
p = _create_publication_page(
f"Test Publication Page {_}", publication_index_page
)
rv.append(p)
return rv | 4,607 |
def copy_orphans(source_path, albums_paths, out_path):
""" Copies orphan files.
Method iterates over the files in the first level of source_path and detects
the files that do not exist under any level of albums_path. These are considered
orphans files and are copied to out_path. Files that have the same filename as other
files in albums_path but have possibly different content are placed in a separate folder.
Keyword arguments:
source_path -- the path that contains the potentially orphan files in its first level
albums_paths -- a list of paths that contain all the media organized in albums
out_path -- the path to copy the orphan files to
"""
# Scan all files in albums_path
start_time = default_timer()
count = 0
album_files_dic = {} # hash -> [filename lowercased, filepath, filesize]
for album_path in albums_paths:
for root, directories, files in os.walk(album_path):
# print('--Dir: ' + root)
for directory in directories:
full_dir = os.path.join(root, directory)
# print(full_dir)
for name in files:
filename, file_extension = os.path.splitext(name)
if file_extension.lower() in ignored_file_extensions:
continue
if name in ignored_files:
continue
count += 1
path = os.path.join(root, name)
album_files_dic[get_id(root, name)] = [name.casefold(), path, os.path.getsize(path)]
# print(os.path.join(root, name))
print('\nTotal number of unique files found in albums path: %d (total files: %d)' % (len(album_files_dic), count))
# Scan files in source_path
files = list(file for file in os.listdir(source_path)
if os.path.isfile(os.path.join(source_path, file))
and file not in ignored_files
and os.path.splitext(file)[1].lower() not in ignored_file_extensions)
print('\nTotal number of files found in source path: %d' % (len(files)))
# Detect orphan files
source_files_dic = {} # hash -> [filename lowercased, filepath, filesize]
for file in files:
path = os.path.join(source_path, file)
source_files_dic[get_id(source_path, file)] = [file.casefold(), path, os.path.getsize(path), ]
orphan_ids = set(source_files_dic.keys()) - album_files_dic.keys()
orphan_files = [source_files_dic[orphan_id] for orphan_id in orphan_ids]
orphan_same_name_files = []
orphan_files2 = []
for [orphan_name, orphan_path, orphan_size] in orphan_files:
found_similar = False
found_same_name = False
for [filename, filepath, size] in album_files_dic.values():
if orphan_name == filename:
found_same_name = True
# Find files with same filename that are similar. These files probably contain the same image data
# and should not be considered orphan files.
if check_for_similar_files and is_similar_file(orphan_path, orphan_size, filepath, size):
found_similar = True
break
if not found_similar:
# Non similar files that have a filename that exists in the albums_path are possibly orphan files, but
# we are not 100% sure. That's why we store them to a different list: orphan_same_name_files and
# copy them to a different folder so that the user can check them out.
if found_same_name:
orphan_same_name_files.append([orphan_name, orphan_path, orphan_size])
else:
orphan_files2.append([orphan_name, orphan_path, orphan_size])
orphan_files = orphan_files2
print('\nTotal number of orphan files found: %d' % len(orphan_files))
print('Total number of orphan files with same filename found: %d' % len(orphan_same_name_files))
print('')
# Copy orphan files to out_path
for file in orphan_files:
filename = os.path.basename(file[1])
if print_files:
print(filename)
src = file[1]
dst = os.path.join(out_path, filename)
shutil.copy2(src, dst)
copy_file_time(src, dst)
if orphan_same_name_files:
if print_files:
print('\n---Same filename---')
out_path2 = os.path.join(out_path, SAME_FILENAME_FOLDER)
if not os.path.exists(out_path2):
os.makedirs(out_path2)
for file in orphan_same_name_files:
filename = os.path.basename(file[1])
if print_files:
print(filename)
src = file[1]
dst = os.path.join(out_path2, filename)
shutil.copy2(src, dst)
copy_file_time(src, dst)
end_time = default_timer()
print('\nCoping of orphan files to out path finished. Total time: %d seconds' % (end_time - start_time))
return | 4,608 |
def rp_completion(
rp2_metnet,
sink,
rp2paths_compounds,
rp2paths_pathways,
cache: rrCache = None,
upper_flux_bound: float = default_upper_flux_bound,
lower_flux_bound: float = default_lower_flux_bound,
max_subpaths_filter: int = default_max_subpaths_filter,
logger: Logger = getLogger(__name__)
) -> List[rpPathway]:
"""Process to the completion of metabolic pathways
generated by RetroPath2.0 and rp2paths.
(1) rp2paths generates a sets of master pathways which
each of them is a set of chemical transformations.
(2) Each chemical transformation refers to one or
multiple reaction rule.
(3) Each reaction rule comes from one or multiple
template (original) chemical reaction
The completion consists in:
1. exploring all possible metabolic pathways through
steps (2) and (3)
2. putting back chemical species removed during reaction
rules building process
The completion is done for all master pathways of step (1).
Parameters
----------
rp2_metnet: str
Path to the file containing the metabolic network
sink: str
Path to the file containing the list of
species in the sink
rp2paths_compounds: str
Path to the file containing the chemical
species involved in master metabolic pathways
rp2paths_pathways: str
Path to the file containing the master metabolic
pathways
cache: rrCache, optional
Cache that contains reaction rules data
upper_flux_bound: float, optional
Upper flux bound for all new reactions created
(default: default_upper_flux_bound from Args file),
lower_flux_bound: float, optional
Lower flux bound for all new reactions created
(default: default_lower_flux_bound from Args file),
max_subpaths_filter: int, optional
Number of pathways (best) kept per master pathway
(default: 10)
logger: Logger, optional
Returns
-------
List of rpPathway objects
"""
if cache is None:
cache = rrCache(
attrs=[
'rr_reactions',
'template_reactions',
'cid_strc',
'deprecatedCompID_compid',
]
)
## READ
__rp2paths_compounds_in_cache(
infile=rp2paths_compounds,
cache=cache,
logger=logger
)
pathways, transfos = __read_pathways(
infile=rp2paths_pathways,
logger=logger
)
ec_numbers = __read_rp2_metnet(
infile=rp2_metnet,
logger=logger
)
sink_molecules = __read_sink(
infile=sink,
logger=logger
)
# COMPLETE TRANSFORMATIONS
full_transfos = __complete_transformations(
transfos=transfos,
ec_numbers=ec_numbers,
cache=cache,
logger=logger
)
# GENERATE THE COMBINATORY OF SUB-PATHWAYS
# Build pathways over:
# - multiple reaction rules per transformation (TRS) and
# - multiple template reactions per reaction rule
pathway_combinatorics = __build_pathway_combinatorics(
full_transfos,
pathways,
cache=cache,
logger=logger
)
# BUILD + RANK SUB-PATHWAYS
all_pathways = __build_all_pathways(
pathways=pathway_combinatorics,
transfos=full_transfos,
sink_molecules=sink_molecules,
rr_reactions=cache.get('rr_reactions'),
compounds_cache=cache.get('cid_strc'),
max_subpaths_filter=max_subpaths_filter,
# compartment_id=compartment_id,
lower_flux_bound=lower_flux_bound,
upper_flux_bound=upper_flux_bound,
logger=logger
)
# # Return flat list of overall topX pathways
# return sum(
# [
# pathways
# for pathways in all_pathways.values()
# ], [])[:max_subpaths_filter]
return all_pathways
# for sub_pathways in all_pathways.values():
# for sub_pathway in sub_pathways:
# print(sub_pathway)
# from chemlite import Pathway
# print(all_pathways)
# for sub_pathways in all_pathways.values():
# for i in range(len(sub_pathways)):
# for j in range(i+1, len(sub_pathways)):
# if sub_pathways[i] == sub_pathways[j]:
# print(f'Equality between {sub_pathways[i].get_id()} and {sub_pathways[j].get_id()}')
# print()
# print(Pathway._to_dict(all_pathways[1][0])) | 4,609 |
def wrap(func, *args, unsqueeze=False):
"""
Wrap a torch function so it can be called with NumPy arrays.
Input and return types are seamlessly converted.
"""
args = list(args)
for i, arg in enumerate(args):
if type(arg) == np.ndarray:
args[i] = torch.from_numpy(arg)
if unsqueeze:
args[i] = args[i].unsqueeze(0)
result = func(*args)
if isinstance(result, tuple):
result = list(result)
for i, res in enumerate(result):
if type(res) == torch.Tensor:
if unsqueeze:
res = res.squeeze(0)
result[i] = res.numpy()
return tuple(result)
elif type(result) == torch.Tensor:
if unsqueeze:
result = result.squeeze(0)
result = result.numpy()
return result
else:
return result | 4,610 |
def STOCHF(data, fastk_period=5, fastd_period=3, fastd_ma_type=0):
"""
Stochastic %F
:param pd.DataFrame data: pandas DataFrame with open, high, low, close data
:param int fastk_period: period used for K fast indicator calculation
:param int fastd_period: period used for D fast indicator calculation
:param int fastd_ma_type: fast D moving average type (0 simple, 1 exponential)
:return pd.Series: with indicator data calculation results
"""
fn = Function('STOCHF')
return fn(data, fastk_period=fastk_period, fastd_period=fastd_period,
fastd_matype=fastd_ma_type) | 4,611 |
def playerid_reverse_lookup(player_ids, key_type=None):
"""Retrieve a table of player information given a list of player ids
:param player_ids: list of player ids
:type player_ids: list
:param key_type: name of the key type being looked up (one of "mlbam", "retro", "bbref", or "fangraphs")
:type key_type: str
:rtype: :class:`pandas.core.frame.DataFrame`
"""
key_types = ('mlbam', 'retro', 'bbref', 'fangraphs', )
if not key_type:
key_type = key_types[0] # default is "mlbam" if key_type not provided
elif key_type not in key_types:
raise ValueError(
'[Key Type: {}] Invalid; Key Type must be one of "{}"'.format(key_type, '", "'.join(key_types))
)
table = get_lookup_table()
key = 'key_{}'.format(key_type)
results = table[table[key].isin(player_ids)]
results = results.reset_index().drop('index', 1)
return results | 4,612 |
def test_expense_categories_search(ns_connection):
"""
HOW TO JOIN account's name in advanced search
"""
categories = list(ns_connection.expense_categories.get_all())
ns_connection.client.set_search_preferences(return_search_columns=True)
advanced_categories = list(ns_connection.expense_categories.advanced_search(
100,
('internalId', 'name', 'account', 'subsidiary',)
))
assert len(advanced_categories) == len(categories) | 4,613 |
def write_yaml(data):
""" A function to write YAML file"""
with open('toyaml.yml', 'w') as f:
yaml.dump(data, f) | 4,614 |
def get_delta_z(z, rest_ghz, ghz=None):
"""
Take a measured GHz value, and calculates the restframe GHz value based on the given z of the matched galaxy
:param z:
:param ghz:
:return:
"""
# First step is to convert to nm rom rest frame GHz
set_zs = []
for key, values in transitions.items():
if values[0] - 0.3 <= z <= values[1] + 0.3:
sghz = values[2] * u.GHz # Gets the GHz of the CO line
rest_ghz /= (z+1)
set_z = np.round((sghz - rest_ghz)/ rest_ghz, 3) # (Freq_emitted - Freq_obs)/ Freq_obs = z
set_z = z - set_z
rest_ghz *= (z+1)
print("Z: {} Set Z: {}".format(z, set_z))
set_zs.append((key, set_z))
set_z = np.min([np.abs(i[1]) for i in set_zs])
print(set_zs)
print(set_z)
for element in set_zs:
if np.isclose(np.abs(element[1]),set_z):
return element[1], element[0] | 4,615 |
def hist_orientation(qval, dt):
"""
provided with quats, and time spent* in the direction defined by quat
produces grouped by ra, dec and roll quaternions and corresponding time, spent in quats
params: qval a set of quats stored in scipy.spatial.transfrom.Rotation class
params: dt corresponding to the set of quaternions, set of time intervals duration (which sc spent in the dirrection defined by quaternion)
return: exptime, qval - histogramed set of quaterninons with corresponding times
"""
oruniq, uidx, invidx = hist_quat(qval)
exptime = np.zeros(uidx.size, np.double)
np.add.at(exptime, invidx, dt)
return exptime, qval[uidx] | 4,616 |
def prepare_data(args: argparse.Namespace) -> None:
"""Break one or a list of videos into frames."""
url_root = args.url_root
if args.s3:
url_root = s3_setup(args.s3)
inputs = parse_input_list(args)
logger.info("processing %d video(s) ...", len(inputs))
num_videos = len(inputs)
video_range = range(len(inputs))
quiet = num_videos > 1
if num_videos > 1:
video_range = tqdm(video_range)
jobs = args.jobs
if num_videos >= jobs > 0:
Parallel(n_jobs=jobs, backend="multiprocessing")(
delayed(process_input)(
inputs[i],
args.fps,
args.start_time,
args.max_frames,
args.out_dir,
quiet,
)
for i in video_range
)
else:
for i in video_range:
process_input(
inputs[i],
args.fps,
args.start_time,
args.max_frames,
args.out_dir,
quiet,
)
# upload to s3 if needed
if args.s3:
upload_files_to_s3(args.s3, args.out_dir)
# create the yaml file
if not args.no_list:
create_image_list(args.out_dir, url_root) | 4,617 |
def delete_job_queue(jobQueue=None):
"""
Deletes the specified job queue. You must first disable submissions for a queue with the UpdateJobQueue operation. All jobs in the queue are terminated when you delete a job queue.
It is not necessary to disassociate compute environments from a queue before submitting a DeleteJobQueue request.
See also: AWS API Documentation
Exceptions
Examples
This example deletes the GPGPU job queue.
Expected Output:
:example: response = client.delete_job_queue(
jobQueue='string'
)
:type jobQueue: string
:param jobQueue: [REQUIRED]\nThe short name or full Amazon Resource Name (ARN) of the queue to delete.\n
:rtype: dict
ReturnsResponse Syntax{}
Response Structure
(dict) --
Exceptions
Batch.Client.exceptions.ClientException
Batch.Client.exceptions.ServerException
Examples
This example deletes the GPGPU job queue.
response = client.delete_job_queue(
jobQueue='GPGPU',
)
print(response)
Expected Output:
{
'ResponseMetadata': {
'...': '...',
},
}
:return: {}
:returns:
Batch.Client.exceptions.ClientException
Batch.Client.exceptions.ServerException
"""
pass | 4,618 |
def tcpip(port=5555, debug=False):
"""
切换到tcpip模式(网络模式)
:param port: 端口(默认值5555)
:param debug: 调试开关(默认关闭)
:return: 不涉及
"""
return adb_core.execute(f'tcpip {port}', debug=debug) | 4,619 |
def proclamadelcaucacom_story(soup):
"""
Function to pull the information we want from Proclamadelcauca.com stories
:param soup: BeautifulSoup object, ready to parse
"""
hold_dict = {}
#text
try:
article_body = soup.find('div', attrs={"class": "single-entradaContent"})
maintext = [para.text.strip() for para in article_body.find_all('p')]
hold_dict['maintext'] = '\n '.join(maintext).strip()
except:
article_body = None
return hold_dict | 4,620 |
def asarray_fft(x, inverse):
"""Recursive implementation of the 1D Cooley-Tukey FFT using np asarray
to prevent copying.
Parameters:
x (array): the discrete amplitudes to transform.
inverse (bool): perform the inverse fft if true.
Returns:
x (array): the amplitudes of the original signal.
OR
X (complex number array): the phase and amplitude of the transformation.
"""
coef = 1 if inverse else -1
N = x.shape[0]
# validating input array
if np.log2(N) % 1 > 0:
raise ValueError('array size must be a power of 2')
# 32 was arbitrarily chosen as "good enough"
elif N <= 32:
return dft(x, inverse)
# perform DFT on all N <= 32 sub-arrays
else:
even_terms = asarray_fft(x[::2], inverse)
odd_terms = asarray_fft(x[1::2], inverse)
exp_array = np.exp(coef * 2j * np.pi * np.arange(N) / N)
return np.concatenate([even_terms + exp_array[:(N >> 1)] * odd_terms,
even_terms + exp_array[(N >> 1):] * odd_terms]) | 4,621 |
def bootstrap(ctx):
"""Creates a systemd service file and to enable it."""
project_name = ctx.project.name
service_file = Path(ctx.paths.remote.configs) / f'{project_name}.service'
ctx.sudo(f'systemctl enable --now {service_file}') | 4,622 |
def get_long_description():
"""
Returns the long description of Wapyce.
:return: The long description of Wapyce.
:rtype: str
"""
with open(
os.path.join(BASE_DIRECTORY, 'README.md'),
'r',
encoding='utf-8'
) as readme_file:
return readme_file.read() | 4,623 |
def wrap_locale_context(func):
"""Wraps the func with the current locale."""
@jinja2.contextfilter
def _locale_filter(ctx, value, *args, **kwargs):
doc = ctx['doc']
if not kwargs.get('locale', None):
kwargs['locale'] = str(doc.locale)
return func(value, *args, **kwargs)
return _locale_filter | 4,624 |
def _create_node(module_path, tree_element):
"""Constructs a list of strings comprising the sections of the node module. Node module file is written.
:param module_path: full path to the library where the subnode lives
:param tree_element: Element object from the xml.etree.ElementTree package
:return: None
"""
module_text = _create_header()
module_text += _create_imports() + ['\n']
module_text += _create_class(tree_element) + ['\n']
module_text += _create_is_true() + ['\n']
module_text += _create_unittest(tree_element) + ['\n']
_write_node_file(module_path, tree_element.find('name').text + '.py', module_text) | 4,625 |
def mu_Xe(keV=12):
"""Returns inverse 1/e penetration depth [mm-1 atm-1] of Xe given the
x-ray energy in keV. The transmission through a 3-mm thick slab of Xe at
6.17 atm (76 psi) was calculated every 100 eV over an energy range
spanning 5-17 keV using:
http://henke.lbl.gov/optical_constants/filter2.html
This result was then converted to mu and saved as a tab-delimited text
file. The returned result is calculated using a univariate spline and
should be valid over the range 5-17 keV."""
from numpy import loadtxt
from scipy.interpolate import UnivariateSpline
E_mu = loadtxt('mu_Xe.txt',dtype=float,delimiter='\t')
us_mu = UnivariateSpline(E_mu[:,0],E_mu[:,1],s=0)
return us_mu(1000*keV) | 4,626 |
def file_md5(input_file):
"""
:param input_file: Path to input file.
:type input_file: str
:return: Returns the encoded data in the inputted file in hexadecimal format.
"""
with open(input_file, 'rb') as f:
data = f.read()
return hashlib.md5(data).hexdigest() | 4,627 |
def signup(request):
"""Register a new user.
This view has multiple behaviours based on the request composition. When
some user is already signed in, the response ask the user to first sign
out. When the request has no data about the new user, then the response
carries the registration form. When the request has valid data about the
new user, the response informs him the registration success.
Args:
request (HttpRequest): the user request.
Returns:
HttpResponse: The response accordingly to the request composition.
"""
keys = request.POST.keys()
if request.user.is_authenticated():
return render(request, 'account/signup_already_signedin.html', None)
elif len(keys) == 0:
return render(request, 'account/signup.html', None)
else:
username = request.POST.get('username')
first_name = request.POST.get('first_name')
last_name = request.POST.get('last_name')
email = request.POST.get('email')
password = request.POST.get('password')
# confirm_password = request.POST.get('confirm_password')
# TODO: Check password and confirmation.
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_DEMAND)
ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, settings.LDAP_CACERTFILE)
l = ldap.initialize(secret.LDAP_URI)
l.protocol_version = ldap.VERSION3
l.start_tls_s()
l.simple_bind_s(secret.LDAP_BIND_DN, secret.LDAP_BIND_PASSWORD)
dn = settings.LDAP_USER_DN_TEMPLATE % str(username)
user = {
'cn': str(first_name),
'sn': str(last_name),
'mail': str(email),
'userPassword': str(utils.hashPassword(password)),
'objectClass': ['person', 'organizationalPerson', 'inetOrgPerson']
}
ldif = modlist.addModlist(user)
l.add_s(dn, ldif)
l.unbind_s()
context = {'username': username}
return render(request, 'account/signup_successful.html', context) | 4,628 |
def exit_func():
"""
this function should be called when exiting, first clear the screen, then exit
:return: exits program and clear the screen
"""
clear()
sys.exit(0) | 4,629 |
def assert_content_text_equal(act_text, exp_filepath):
"""
Compare an actual text file content and the content of an expected text
file for line-wise equality, tolerating differences in line endings
(LF vs. CRLF).
"""
with open(exp_filepath, 'r') as exp_fp:
exp_text = exp_fp.read()
act_text = act_text.replace('\r\n', '\n')
assert act_text == exp_text, \
"Unexpected text:\n" \
" actual: {!r}\n" \
" expected: {!r}\n". \
format(act_text, exp_text) | 4,630 |
def shuffle_rows(X):
"""
Randomly shuffles rows of a numpy matrix
"""
n, _ = X.shape
indices = list(range(n))
shuffle(indices)
X[:] = X[indices] | 4,631 |
def message_args() -> Dict[str, str]:
"""A formatted message."""
return {"subject": "Test message", "message": "This is a test message"} | 4,632 |
def to_drive_type(value):
"""Convert value to DriveType enum."""
if isinstance(value, DriveType):
return value.value
sanitized = str(value).upper().strip().replace(" ", "_")
try:
return DriveType[sanitized].value
except KeyError as err:
raise ValueError(f"Unknown drive type: {value}") from err | 4,633 |
def upload_authorized_key(host, port, filepath):
"""UPLOAD (key) upload_authorized_key"""
params = {'method': 'upload_authorized_key'}
files = [('key', filepath, file_get_contents(filepath))]
return _check(https.client.https_post(host, port, '/', params, files=files)) | 4,634 |
def thermalize_cutoff(localEnergies, smoothing_window, tol):
"""Return position where system is thermalized
according to some tolerance tol, based on the derivative
of the smoothed local energies
"""
mean = np.mean(localEnergies)
smoothLocalEnergies = smoothing(localEnergies, smoothing_window)
check_slope = derivative(smoothLocalEnergies) < tol
cutoff = np.where(check_slope)[0][0]
return cutoff | 4,635 |
def annualize_metric(metric: float, holding_periods: int = 1) -> float:
"""
Annualize metric of arbitrary periodicity
:param metric: Metric to analyze
:param holding_periods:
:return: Annualized metric
"""
days_per_year = 365
trans_ratio = days_per_year / holding_periods
return (1 + metric) ** trans_ratio - 1 | 4,636 |
def parse_args():
"""Command line arguments parser."""
app = argparse.ArgumentParser()
app.add_argument("in_chain", help="Input chain file or stdin")
app.add_argument("reference_2bit", help="Reference 2bit file")
app.add_argument("query_2bit", help="Query 2bit file")
app.add_argument("output", help="Output chain or stdout")
app.add_argument("-linearGap", choices=['loose', 'medium', 'filename'], help="loose|medium|filename")
app.add_argument("-scoreScheme", help="Read the scoring matrix from a blastz-format file")
if len(sys.argv) < 5:
app.print_help()
sys.exit(0)
args = app.parse_args()
return args | 4,637 |
def get_latest_tag():
"""
Find the value of the latest tag for the Adafruit CircuitPython library
bundle.
:return: The most recent tag value for the project.
"""
global LATEST_BUNDLE_VERSION # pylint: disable=global-statement
if LATEST_BUNDLE_VERSION == "":
LATEST_BUNDLE_VERSION = get_latest_release_from_url(
"https://github.com/adafruit/Adafruit_CircuitPython_Bundle/releases/latest"
)
return LATEST_BUNDLE_VERSION | 4,638 |
def detail(request, name):
"""
List all details about a single service.
"""
service = CRITsService.objects(name=name,
status__ne="unavailable").first()
if not service:
error = 'Service "%s" is unavailable. Please review error logs.' % name
return render_to_response('error.html', {'error': error},
RequestContext(request))
# TODO: fix code so we don't have to do this
service = service.to_dict()
service_class = crits.service_env.manager.get_service_class(name)
if user_is_admin(request.user):
clean = False
# Only show errors if the user is an admin.
error = _get_config_error(service)
else:
# Clean all non-public values for a non-admin
clean = True
error = None
# Replace the dictionary with a list (to preserve order the options
# were defined in the Service class), and remove data from any which
# are not editable)
service['config_list'] = service_class.format_config(service['config'],
clean=clean)
del service['config']
return render_to_response('services_detail.html',
{'service': service, 'config_error': error},
RequestContext(request)) | 4,639 |
def read_gold_conll2003(gold_file):
"""
Reads in the gold annotation from a file in CoNLL 2003 format.
Returns:
- gold: a String list containing one sequence tag per token.
E.g. [B-Kochschritt, L-Kochschritt, U-Zutat, O]
- lines: a list list containing the original line split at "\t"
"""
gold = []
lines = []
with open(gold_file, encoding="utf-8") as f:
for line in f:
if line == "\n":
continue
line = line.strip().split("\t")
gold.append(line[3])
lines.append(line)
return gold, lines | 4,640 |
def clone_reindex(clone_ingest_config: CloneIngestConfig, **kwargs):
"""Pipeline for pulling down jsons from s3 and reindexing into elasticsearch"""
announce('Pulling down parsed snapshot files for reindexing ...')
clone_ingest_config.snapshot_manager.pull_current_snapshot_to_disk(
local_dir=clone_ingest_config.parsed_doc_base_dir,
snapshot_type='parsed',
using_db=False,
max_threads=clone_ingest_config.max_threads
)
if not next((p for p in clone_ingest_config.parsed_doc_base_dir.iterdir() if p.is_file()), None):
announce("[WARNING] No files were found for processing, exiting pipeline.")
exit(1)
announce('Reindexing in elasticsearch ...')
CloneIngestSteps.update_es(clone_ingest_config)
# not available on clones yet
# CloneIngestSteps.update_revocations(clone_ingest_config) | 4,641 |
def translate_nova_exception(method):
"""Transforms a cinder exception but keeps its traceback intact."""
@functools.wraps(method)
def wrapper(self, ctx, *args, **kwargs):
try:
res = method(self, ctx, *args, **kwargs)
except (nova_exceptions.ConnectionRefused,
keystone_exceptions.ConnectionError) as exc:
err_msg = encodeutils.exception_to_unicode(exc)
_reraise(exception.NovaConnectionFailed(reason=err_msg))
except (keystone_exceptions.BadRequest,
nova_exceptions.BadRequest)as exc:
err_msg = encodeutils.exception_to_unicode(exc)
_reraise(exception.BadRequest(reason=err_msg))
except (keystone_exceptions.Forbidden,
nova_exceptions.Forbidden):
_reraise(exception.NotAuthorized())
except (keystone_exceptions.NotFound,
nova_exceptions.NotFound) as exc:
err_msg = encodeutils.exception_to_unicode(exc)
_reraise(exception.InstanceNotFound(reason=err_msg))
return res
return wrapper | 4,642 |
def _update_form(form):
""" """
if not form.text():
return form.setStyleSheet(error_css)
return form.setStyleSheet(success_css) | 4,643 |
def build_vocab_from_file(src_file, save_path, min_frequency=5, size=0, without_sequence_tokens=False):
"""
Generate word vocabularies from monolingual corpus.
:param src_file: Source text file.
:param save_path: Output vocabulary file.
:param min_frequency: Minimum word frequency. # for yelp and amazon, min_frequency=5
:param size: Maximum vocabulary size. If = 0, do not limit vocabulary.
:param without_sequence_tokens: If set, do not add special sequence tokens (start, end) in the vocabulary.
:return: No return.
"""
special_tokens = [constants.PADDING_TOKEN]
if not without_sequence_tokens:
special_tokens.append(constants.START_OF_SENTENCE_TOKEN)
special_tokens.append(constants.END_OF_SENTENCE_TOKEN)
vocab = {}
with open(src_file) as f:
for line in f:
words = line.split('\t')[0].split(' ')
for word in words:
if word in vocab:
vocab[word] += 1
else:
vocab[word] = 1
filtered_list = filter(lambda kv: kv[1] > min_frequency, vocab.iteritems())
sorted_list = sorted(filtered_list, key=lambda kv: (kv[1], kv[0]), reverse=True)
if size != 0:
sorted_list = sorted_list[:size]
with open(save_path, 'w') as f:
for s in special_tokens:
f.write(s)
f.write('\n')
for (k, v) in sorted_list:
f.write(k)
f.write('\n') | 4,644 |
def create_adapter(openvino, cpu_only, force_torch, use_myriad):
"""Create the best adapter based on constraints passed as CLI arguments."""
if use_myriad:
openvino = True
if cpu_only:
raise Exception("Cannot run with both cpu-only and Myriad options")
if force_torch and openvino:
raise Exception("Cannot run with both Torch and OpenVINO")
if not openvino:
if importlib.util.find_spec("torch") is None:
logger.info("Could not find Torch")
openvino = True
elif not cpu_only:
import torch
if torch.cuda.is_available():
logger.info("Detected GPU / CUDA support")
from torch_adapter import TorchAdapter
return TorchAdapter(False, DEFAULT_STYLE)
else:
logger.info("Failed to detect GPU / CUDA support")
if not force_torch:
if importlib.util.find_spec("openvino") is None:
logger.info("Could not find Openvino")
if openvino:
raise Exception("No suitable engine")
else:
if not cpu_only and not use_myriad:
from openvino.inference_engine import IEPlugin
try:
IEPlugin("GPU")
logger.info("Detected iGPU / clDNN support")
except RuntimeError:
logger.info("Failed to detect iGPU / clDNN support")
cpu_only = True
logger.info("Using OpenVINO")
logger.info("CPU Only: %s", cpu_only)
logger.info("Use Myriad: %s", use_myriad)
from openvino_adapter import OpenvinoAdapter
adapter = OpenvinoAdapter(cpu_only, DEFAULT_STYLE,
use_myriad=use_myriad)
return adapter
logger.info("Using Torch with CPU")
from torch_adapter import TorchAdapter
return TorchAdapter(True, DEFAULT_STYLE) | 4,645 |
def test_2():
"""
Binary and unary predicates with an overlapping variable subset.
:return:
"""
x, y = map(Variable, ['x', 'y'])
model = Model() # Instantiate a model.
enemy = model['enemy'] = Predicate(arity=2, name='enemy')
hostile = model['hostile'] = Predicate(name='hostile')
model['america-enemies'] = ForAll(x, Implies(enemy(x, (y, 'America')),
hostile(x),
name='enemy->hostile',
join=Join.OUTER),
name='america-enemies', join=Join.OUTER,
world=World.AXIOM)
# Add facts to model.
model.add_facts({'enemy': {('Nono', 'America'): Fact.TRUE}})
model.upward()
assert len(hostile.groundings) == 1, "FAILED 😔" | 4,646 |
def _make_ecg(inst, start, stop, reject_by_annotation=False, verbose=None):
"""Create ECG signal from cross channel average."""
if not any(c in inst for c in ['mag', 'grad']):
raise ValueError('Unable to generate artificial ECG channel')
for ch in ['mag', 'grad']:
if ch in inst:
break
logger.info('Reconstructing ECG signal from {}'
.format({'mag': 'Magnetometers',
'grad': 'Gradiometers'}[ch]))
picks = pick_types(inst.info, meg=ch, eeg=False, ref_meg=False)
if isinstance(inst, BaseRaw):
reject_by_annotation = 'omit' if reject_by_annotation else None
ecg, times = inst.get_data(picks, start, stop, reject_by_annotation,
True)
elif isinstance(inst, BaseEpochs):
ecg = np.hstack(inst.copy().crop(start, stop).get_data())
times = inst.times
elif isinstance(inst, Evoked):
ecg = inst.data
times = inst.times
return ecg.mean(0, keepdims=True), times | 4,647 |
def lorentzian(freq, freq0, area, hwhm, phase, offset, drift):
"""
Lorentzian line-shape function
Parameters
----------
freq : float or float array
The frequencies for which the function is evaluated
freq0 : float
The center frequency of the function
area : float
hwhm: float
Half-width at half-max
"""
oo2pi = 1/(2*np.pi)
df = freq - freq0
absorptive = oo2pi * area * np.ones(freq.shape[0])*(hwhm / (df**2 + hwhm**2))
dispersive = oo2pi * area * df/(df**2 + hwhm**2)
return (absorptive * np.cos(phase) + dispersive * np.sin(phase) + offset +
drift * freq) | 4,648 |
def findTilt(root):
"""
:type root: TreeNode
:rtype: int
"""
if root is None:
return 0
return findTilt_helper(root)[1] | 4,649 |
def generate_report():
"""
Get pylint analization report and write it to file
"""
files = get_files_to_check()
dir_path = create_report_dir()
file_path = create_report_file(dir_path)
config_opts = get_config_opts()
pylint_opts = '--load-plugins pylint_flask' + config_opts
pylint_stdout, pylint_stderr = epylint.py_run(files + ' ' + pylint_opts, return_std=True)
with open(file_path, 'w+') as report:
report.write(pylint_stdout.getvalue())
report.write(pylint_stderr.getvalue())
return True | 4,650 |
def read_files(year,month,day,station):
"""
"""
doy,cdoy,cyyyy,cyy = ymd2doy(year,month,day)
# i have a function for this ....
rinexfile = station + cdoy + '0.' + cyy + 'o'
navfilename = 'auto' + cdoy + '0.' + cyy + 'n'
if os.path.isfile(rinexfile):
print('rinexfile exists')
else:
print(rinexfile)
print('get the rinex file')
rinex_unavco(station, year, month, day)
# organize the file names
print('get the sp3 and clock file names')
sp3file, cname = igsname(year,month,day)
# define some names of files
if os.path.isfile(navfilename):
print('nav exists')
else:
print('get nav')
navname,navdir,foundit = getnavfile(year,month,day)
print('read in the broadcast ephemeris')
ephemdata = myreadnav(navfilename)
if os.path.isfile(cname):
print('file exists')
else:
print('get the CODE clock file')
codclock(year,month,day)
pname = cname[0:9] + 'pckl'
print('pickle', pname)
# if file exists already
if os.path.isfile(pname):
print('read existing pickle file')
f = open(pname, 'rb')
[prns,ts,clks] = pickle.load(f)
f.close()
else:
print('read and save as pickle')
prns, ts, clks = readPreciseClock(cname)
# and then save them
f = open(pname, 'wb')
pickle.dump([prns,ts,clks], f)
f.close()
if os.path.isfile(sp3file):
print('sp3 exsts')
else:
print('get sp3')
getsp3file(year,month,day)
print('read in the sp3 file', sp3file)
sweek, ssec, sprn, sx, sy, sz, sclock = read_sp3(sp3file)
#20 print('len returned data', len(ephemdata), navfilename
rinexpickle = rinexfile[0:11] + 'pclk'
if os.path.isfile(rinexpickle):
print('rinex pickle exists')
f=open(rinexpickle,'rb')
[obs,x,y,z]=pickle.load(f)
f.close()
else:
print('read the RINEX file ', rinexfile)
obs,x,y,z = myscan(rinexfile)
print('save as pickle file')
f=open(rinexpickle,'wb')
pickle.dump([obs,x,y,z], f)
f.close()
return ephemdata, prns, ts, clks, sweek, ssec, sprn, sx, sy,sz,sclock,obs,x,y,z | 4,651 |
def make_trampoline(func_name):
""" Create a main function that calls another function """
mod = ir.Module('main')
main = ir.Procedure('main')
mod.add_function(main)
entry = ir.Block('entry')
main.add_block(entry)
main.entry = entry
entry.add_instruction(ir.ProcedureCall(func_name, []))
entry.add_instruction(ir.Exit())
return mod | 4,652 |
def plot_in_flux_basic(
cur,
facility,
title):
"""Plots timeseries influx/ outflux from facility name in kg.
Parameters
----------
cur: sqlite cursor
sqlite cursor
facility: str
facility name
influx_bool: bool
if true, calculates influx,
if false, calculates outflux
title: str
title of the multi line plot
outputname: str
filename of the multi line plot file
is_cum: Boolean:
true: add isotope masses over time
false: do not add isotope masses at each timestep
Returns
-------
"""
masstime = mass_timeseries(cur, facility, flux='in')[0]
times = mass_timeseries(cur, facility, flux='in')[1]
nuclides = [item[0] for item in masstime]
masses = [item[1] for item in masstime]
mass_sort = sorted(masstime.items(), key=lambda e: e[
1][-1], reverse=True)
nuclides = [item[0] for item in mass_sort]
masses = [item[1] for item in mass_sort]
for i in range(len(times)):
plt.plot(times[i], masses[i], label=nuclides[i])
plt.legend(loc='upper left')
plt.title(title)
plt.xlabel('time [months]')
plt.ylabel('mass [kg]')
plt.xlim(left=0.0)
plt.ylim(bottom=0.0)
plt.show() | 4,653 |
def enrich(
db_path: Path = typer.Argument(
"./spotify.db", file_okay=True, dir_okay=False, writable=True
),
table: str = typer.Argument(
...,
),
uri_column: str = typer.Option(
"spotify_track_uri", help="Column name containing tracks' URIs"
),
new_name: str = typer.Option(
"", help="Name for new table containing audio features from tracks in `table`"
),
):
"""Get audio features for tracks in `table_name`"""
db = Database(db_path)
uris = db.execute(
f"select distinct({uri_column}) from {table} where {uri_column} is not null;"
).fetchall()
if new_name:
table_name = new_name
else:
table_name = f"enriched_{table}"
get_audio_features_from_uri(uris, table_name, db) | 4,654 |
def run_and_retry(json_file: str = "latest.json", charts_file: str = "charts.svg", max_attempts: int = 10, sleep_seconds_on_error: float = 10) -> None:
"""
Calculates the current CBBI confidence value alongside all the required metrics.
Everything gets pretty printed to the current standard output and a clean copy
is saved to a JSON file specified by the path in the ``json_file`` argument.
A charts image is generated on the path specified by the ``charts_file`` argument
which summarizes all individual metrics' historical data in a visual way.
The execution is attempted multiple times in case an error occurs.
Args:
json_file: File path where the output is saved in the JSON format.
charts_file: File path where the charts image is saved (formats supported by pyplot.savefig).
max_attempts: Maximum number of attempts before termination. An attempt is counted when an error occurs.
sleep_seconds_on_error: Duration of the sleep in seconds before attempting again after an error occurs.
Returns:
None
"""
assert max_attempts > 0, 'Value of the max_attempts argument must be at least 1'
assert sleep_seconds_on_error >= 0, 'Value of the sleep_seconds_on_error argument must be positive'
for _ in range(max_attempts):
try:
run(json_file, charts_file)
exit(0)
except Exception:
cli_ui.error('An error occurred!')
traceback.print_exc()
print()
cli_ui.info_1(f'Retrying in {sleep_seconds_on_error} seconds...')
time.sleep(sleep_seconds_on_error)
cli_ui.info_1(f'Max attempts limit has been reached ({max_attempts}). Better luck next time!')
exit(-1) | 4,655 |
def main(global_config, **settings):
"""Return a Pyramid WSGI application."""
if not settings.get('sqlalchemy.url'):
try:
settings['sqlalchemy.url'] = os.environ['BLOG2017_DB']
except KeyError:
print('Required BLOG2017_DB not set in global os environ.')
sys.exit()
authentication_policy = AuthTktAuthenticationPolicy(os.environ.get('AUTH_STRING'))
authorization_policy = ACLAuthorizationPolicy()
config = Configurator(settings=settings,
authentication_policy=authentication_policy,
authorization_policy=authorization_policy)
config.include('pyramid_jinja2')
config.include('.models')
config.include('.routes')
config.scan()
return config.make_wsgi_app() | 4,656 |
def model_fields(dbo, baseuri=None):
"""Extract known fields from a BQ object, while removing any known
from C{excluded_fields}
@rtype: dict
@return fields to be rendered in XML
"""
attrs = {}
try:
dbo_fields = dbo.xmlfields
except AttributeError:
# This occurs when the object is a fake DB objects
# The dictionary is sufficient
dbo_fields= dbo.__dict__
for fn in dbo_fields:
fn = mapping_fields.get(fn, fn)
# Skip when map is None
if fn is None:
continue
# Map is callable, then call
if callable(fn):
fn, attr_val = fn(dbo, fn, baseuri)
else:
attr_val = getattr(dbo, fn, None)
# Put value in attribute dictionary
if attr_val is not None and attr_val!='':
if isinstance(attr_val, basestring):
attrs[fn] = attr_val
else:
attrs[fn] = str(attr_val) #unicode(attr_val,'utf-8')
return attrs | 4,657 |
def test_fetch_incidents(mocker):
"""Unit test
Given
- fetch incidents command
- command args
- command raw response
When
- mock the Client's token generation.
- mock the Client's get_alerts_request.
Then
- run the fetch incidents command using the Client
Validate The length of the results and the last_run.
"""
mocker.patch.object(FireEyeClient, '_get_token', return_value='token')
client = Client(base_url="https://fireeye.cm.com/", username='user', password='pass', verify=False, proxy=False)
mocker.patch.object(FireEyeClient, 'get_alerts_request', return_value=util_load_json('test_data/alerts.json'))
last_run, incidents = fetch_incidents(client=client,
last_run={},
first_fetch='1 year',
max_fetch=50,
info_level='concise')
assert len(incidents) == 11
assert last_run.get('time') == '2021-05-18 12:02:54 +0000' | 4,658 |
def get_bsj(seq, bsj):
"""Return transformed sequence of given BSJ"""
return seq[bsj:] + seq[:bsj] | 4,659 |
def update_celery_task_status_socketio(task_id):
"""
This function would be called in Celery worker
https://python-socketio.readthedocs.io/en/latest/server.html#emitting-from-external-processes
"""
# connect to the redis queue as an external process
external_sio = socketio.RedisManager(
settings.WS_MESSAGE_QUEUE, write_only=True
) # emit on event
external_sio.emit(
"status", get_task_info(task_id), room=task_id, namespace="/task_status"
) | 4,660 |
def test_perform():
"""Test the `/perform` path."""
response = app.test_client().post(
"/perform/simple",
data=json.dumps({"colour": [0, 255, 0]}),
content_type="application/json",
)
assert response.status_code == 200
assert response.json == {"status": "OK"} | 4,661 |
def test_colormap():
"""Test colormap support for non-uniformly distributed control-points"""
with TestingCanvas(size=size, bgcolor='w') as c:
idata = np.linspace(255, 0, size[0]*size[1]).astype(np.ubyte)
data = idata.reshape((size[0], size[1]))
image = Image(cmap=Colormap(colors=['k', 'w', 'r'],
controls=[0.0, 0.1, 1.0]),
clim='auto', parent=c.scene)
image.set_data(data)
assert_image_approved(c.render(), "visuals/colormap_kwr.png") | 4,662 |
def sample(problem: Dict, N: int, calc_second_order: bool = True,
skip_values: int = 0):
"""Generates model inputs using Saltelli's extension of the Sobol' sequence.
Returns a NumPy matrix containing the model inputs using Saltelli's sampling
scheme. Saltelli's scheme extends the Sobol' sequence in a way to reduce
the error rates in the resulting sensitivity index calculations. If
`calc_second_order` is False, the resulting matrix has ``N * (D + 2)``
rows, where ``D`` is the number of parameters. If `calc_second_order` is True,
the resulting matrix has ``N * (2D + 2)`` rows. These model inputs are
intended to be used with :func:`SALib.analyze.sobol.analyze`.
If `skip_values` is > 0, raises a UserWarning in cases where sample sizes may
be sub-optimal. The convergence properties of the Sobol' sequence requires
``N < skip_values`` and that both `N` and `skip_values` are base 2
(e.g., ``N = 2^n``). See discussion in [4] for context and information.
If skipping values, one recommendation is that the largest possible `n` such that
``(2^n)-1 <= N`` is skipped (see [5]).
Parameters
----------
problem : dict
The problem definition
N : int
The number of samples to generate.
Must be an exponent of 2 and < `skip_values`.
calc_second_order : bool
Calculate second-order sensitivities (default True)
skip_values : int
Number of points in Sobol' sequence to skip, ideally a value of base 2
(default 0, see Owen [3] and Discussion [4])
References
----------
.. [1] Saltelli, A., 2002.
Making best use of model evaluations to compute sensitivity indices.
Computer Physics Communications 145, 280–297.
https://doi.org/10.1016/S0010-4655(02)00280-1
.. [2] Sobol', I.M., 2001.
Global sensitivity indices for nonlinear mathematical models and
their Monte Carlo estimates.
Mathematics and Computers in Simulation,
The Second IMACS Seminar on Monte Carlo Methods 55, 271–280.
https://doi.org/10.1016/S0378-4754(00)00270-6
.. [3] Owen, A. B., 2020.
On dropping the first Sobol' point.
arXiv:2008.08051 [cs, math, stat].
Available at: http://arxiv.org/abs/2008.08051 (Accessed: 20 April 2021).
.. [4] Discussion: https://github.com/scipy/scipy/pull/10844
https://github.com/scipy/scipy/pull/10844#issuecomment-673029539
.. [5] Johnson, S. G.
Sobol.jl: The Sobol module for Julia
https://github.com/stevengj/Sobol.jl
"""
# bit-shift test to check if `N` == 2**n
if not ((N & (N-1) == 0) and (N != 0 and N-1 != 0)):
msg = f"""
Convergence properties of the Sobol' sequence is only valid if
`N` ({N}) is equal to `2^n`.
"""
warnings.warn(msg)
if skip_values > 0:
M = skip_values
if not ((M & (M-1) == 0) and (M != 0 and M-1 != 0)):
msg = f"""
Convergence properties of the Sobol' sequence is only valid if
`skip_values` ({M}) is equal to `2^m`.
"""
warnings.warn(msg)
n_exp = int(math.log(N, 2))
m_exp = int(math.log(M, 2))
if n_exp >= m_exp:
msg = f"Convergence may not be valid as 2^{n_exp} ({N}) is >= 2^{m_exp} ({M})."
warnings.warn(msg)
D = problem['num_vars']
groups = _check_groups(problem)
if not groups:
Dg = problem['num_vars']
else:
G, group_names = compute_groups_matrix(groups)
Dg = len(set(group_names))
# Create base sequence - could be any type of sampling
base_sequence = sobol_sequence.sample(N + skip_values, 2 * D)
if calc_second_order:
saltelli_sequence = np.zeros([(2 * Dg + 2) * N, D])
else:
saltelli_sequence = np.zeros([(Dg + 2) * N, D])
index = 0
for i in range(skip_values, N + skip_values):
# Copy matrix "A"
for j in range(D):
saltelli_sequence[index, j] = base_sequence[i, j]
index += 1
# Cross-sample elements of "B" into "A"
for k in range(Dg):
for j in range(D):
if (not groups and j == k) or (groups and group_names[k] == groups[j]):
saltelli_sequence[index, j] = base_sequence[i, j + D]
else:
saltelli_sequence[index, j] = base_sequence[i, j]
index += 1
# Cross-sample elements of "A" into "B"
# Only needed if you're doing second-order indices (true by default)
if calc_second_order:
for k in range(Dg):
for j in range(D):
if (not groups and j == k) or (groups and group_names[k] == groups[j]):
saltelli_sequence[index, j] = base_sequence[i, j]
else:
saltelli_sequence[index, j] = base_sequence[i, j + D]
index += 1
# Copy matrix "B"
for j in range(D):
saltelli_sequence[index, j] = base_sequence[i, j + D]
index += 1
saltelli_sequence = scale_samples(saltelli_sequence, problem)
return saltelli_sequence | 4,663 |
def standardize(table, option):
"""
standardize
Z = (X - mean) / (standard deviation)
"""
if option == 'table':
mean = np.mean(table)
std = np.std(table)
t = []
for row in table:
t_row = []
if option != 'table':
mean = np.mean(row)
std = np.std(row)
for i in row:
if std == 0:
t_row.append(0)
else:
t_row.append((i - mean)/std)
t.append(t_row)
return t | 4,664 |
def cache(project: Project, patterns: Sequence[str], clear: bool):
"""Inspect or clear the cache."""
if clear:
with message_fence("Clearing cache..."):
if cache_names := ", ".join(project.clear_cache(patterns)):
click.echo(f"Cache cleared successfully: {cache_names}.\n")
else:
click.echo(
"No matching results.\n"
if patterns
else "The cache is already cleared.\n"
)
else:
with message_fence("Inspecting cache..."):
click.echo(
"\n".join(project.inspect_cache(patterns))
or (
"No matching results.\n"
if patterns
else "The cache is completely clear.\n"
)
) | 4,665 |
def raw_input_nonblock():
"""
return result of raw_input if has keyboard input, otherwise return None
"""
if _IS_OS_WIN32:
return _raw_input_nonblock_win32()
else:
raise NotImplementedError('Unsupported os.') | 4,666 |
def get_batch_hard(draw_batch_size,hard_batchs_size,semihard_batchs_size,easy_batchs_size,norm_batchs_size,network,dataset,nb_classes, margin):
"""
Create batch of APN "hard" triplets
Arguments:
draw_batch_size -- integer : number of initial randomly taken samples
hard_batchs_size -- interger : select the number of hardest samples to keep
norm_batchs_size -- interger : number of random samples to add
Returns:
triplets -- list containing 3 tensors A,P,N of shape (hard_batchs_size+norm_batchs_size,w,h,c)
"""
X = dataset
m, w, h = X[0].shape # c removed
#Step 1 : pick a random batch to study
studybatch = get_batch_random(draw_batch_size,dataset, nb_classes)
#Step 2 : compute the loss with current network : d(A,P)-d(A,N). The alpha parameter here is omited here since we want only to order them
studybatchloss = np.zeros((draw_batch_size))
#Compute embeddings for anchors, positive and negatives
#print('*',studybatch[0][:,:,:].shape)
A = network.predict(studybatch[0][:,:,:])
P = network.predict(studybatch[1][:,:,:])
N = network.predict(studybatch[2][:,:,:])
#Compute d(A,P)-d(A,N) # HARD
studybatchloss = np.sqrt(np.sum(np.square(A-P),axis=1)) - np.sqrt(np.sum(np.square(A-N),axis=1))
#Sort by distance (high distance first) and take the hardest
selection = np.argsort(studybatchloss)[::-1][:hard_batchs_size]
#Compute d(A,N)-d(A,P) # EASY
studybatchloss = -np.sqrt(np.sum(np.square(A-P),axis=1)) + np.sqrt(np.sum(np.square(A-N),axis=1))
#Sort by distance (high distance first) and take the EASIEST
selection1 = np.argsort(studybatchloss)[::-1][:easy_batchs_size] #
#Compute d(A,N)-d(A,P) SEMI-HARD
semihard_index1 = np.squeeze(np.where(np.sqrt(np.sum(np.square(A-P),axis=1)) + margin > np.sqrt(np.sum(np.square(A-N),axis=1))))
semihard_index2 = np.squeeze(np.where(np.sqrt(np.sum(np.square(A-P),axis=1)) < np.sqrt(np.sum(np.square(A-N),axis=1))))
semihard_index = np.intersect1d(semihard_index1,semihard_index2)
selection2 = semihard_index[:semihard_batchs_size] #
selection = np.append(selection,selection1) #Hard & Easy
selection = np.append(selection,selection2) #Hard & Easy & SemiHard
#Draw other random samples from the batch
selection2 = np.random.choice(np.delete(np.arange(draw_batch_size),selection),norm_batchs_size,replace=False)
selection = np.append(selection,selection2) #Hard & Easy & SemiHard & Random
triplets = [studybatch[0][selection,:,:], studybatch[1][selection,:,:], studybatch[2][selection,:,:]]
return triplets | 4,667 |
def sine(
start, end, freq, amp: Numeric = 1, n_periods: Numeric = 1
) -> TimeSerie:
"""
Generate a sine TimeSerie.
"""
index = pd.date_range(start=start, end=end, freq=freq)
return TimeSerie(
index=index,
y_values=np.sin(
np.linspace(0, 2 * math.pi * n_periods, num=len(index))
)
* amp,
) | 4,668 |
def get_from_address(sending_profile, template_from_address):
"""Get campaign from address."""
# Get template display name
if "<" in template_from_address:
template_display = template_from_address.split("<")[0].strip()
else:
template_display = None
# Get template sender
template_sender = template_from_address.split("@")[0].split("<")[-1]
# Get sending profile domain
if type(sending_profile) is dict:
sp_from = sending_profile["from_address"]
else:
sp_from = sending_profile.from_address
sp_domain = sp_from.split("<")[-1].split("@")[1].replace(">", "")
# Generate from address
if template_display:
from_address = f"{template_display} <{template_sender}@{sp_domain}>"
else:
from_address = f"{template_sender}@{sp_domain}"
return from_address | 4,669 |
def is_dwm_compositing_enabled():
"""Is Desktop Window Manager compositing (Aero) enabled.
"""
import ctypes
enabled = ctypes.c_bool()
try:
DwmIsCompositionEnabled = ctypes.windll.dwmapi.DwmIsCompositionEnabled
except (AttributeError, WindowsError):
# dwmapi or DwmIsCompositionEnabled is not present
return False
rval = DwmIsCompositionEnabled(ctypes.byref(enabled))
return rval == 0 and enabled.value | 4,670 |
def save_any_to_npy(save_dict={}, name='file.npy'):
"""Save variables to .npy file.
Examples
---------
>>> tl.files.save_any_to_npy(save_dict={'data': ['a','b']}, name='test.npy')
>>> data = tl.files.load_npy_to_any(name='test.npy')
>>> print(data)
... {'data': ['a','b']}
"""
np.save(name, save_dict) | 4,671 |
async def kickme(leave):
""" .kickme komutu gruptan çıkmaya yarar """
chat = await leave.get_chat()
await leave.edit(f"{PLUGIN_MESAJLAR['kickme']}".format(
id=chat.id,
title=chat.title,
member_count="Bilinmiyor" if chat.participants_count == None else (chat.participants_count - 1)
))
await leave.client.kick_participant(leave.chat_id, 'me') | 4,672 |
def logging_setup(config: Dict):
"""
setup logging based on the configuration
:param config: the parsed config tree
"""
log_conf = config['logging']
fmt = log_conf['format']
if log_conf['enabled']:
level = logging._nameToLevel[log_conf['level'].upper()]
else:
level = logging.NOTSET
logging.basicConfig(format=fmt, level=logging.WARNING)
logger.setLevel(level) | 4,673 |
def hamming_distance(seq_1, seq_2, ignore_case):
"""Hamming distance between two sequences
Calculate the Hamming distance between SEQ_1 and SEQ_2.
"""
result = pb.hamming_distance(seq_1, seq_2, ignore_case=ignore_case)
click.echo(result) | 4,674 |
def test_apply_many_flows(backend):
"""
Expect multiple flows to operate independently and produce the correct final flow rate.
"""
model = CompartmentalModel(
times=[0, 5], compartments=["S", "I", "R"], infectious_compartments=["I"]
)
model._set_backend(backend)
model.set_initial_population(distribution={"S": 900, "I": 100})
model.add_death_flow("infection_death", 0.1, "I")
model.add_universal_death_flows("universal_death", 0.1)
model.add_infection_frequency_flow("infection", 0.2, "S", "I")
model.add_transition_flow("recovery", 0.1, "I", "R")
model.add_transition_flow("vaccination", 0.1, "S", "R")
model.add_crude_birth_flow("births", 0.1, "S")
model._backend.prepare_to_run()
actual_flow_rates = model._backend.get_compartment_rates(model.initial_population, 0)
# Expect the effects of all these flows to be linearly superimposed.
infect_death_flows = np.array([0, -10, 0])
universal_death_flows = np.array([-90, -10, 0])
infected = 900 * 0.2 * (100 / 1000)
infection_flows = np.array([-infected, infected, 0])
recovery_flows = np.array([0, -10, 10])
vaccination_flows = np.array([-90, 0, 90])
birth_flows = np.array([100, 0, 0])
expected_flow_rates = (
infect_death_flows
+ universal_death_flows
+ infection_flows
+ recovery_flows
+ vaccination_flows
+ birth_flows
)
assert_array_equal(actual_flow_rates, expected_flow_rates) | 4,675 |
def postreleaser_before(data):
"""
postreleaser.before hook to set a different dev_version_template from the
default: By default zest.releaser uses <version>.dev0. We want just
<version>.dev without the mysterious 0.
"""
if data['name'] != 'astropy':
return
data['dev_version_template'] = '%(new_version)s.dev' | 4,676 |
def fgsm(x, y_true, y_hat, epsilon=0.075):
"""Calculates the fast gradient sign method adversarial attack
Following the FGSM algorithm, determines the gradient of the cost function
wrt the input, then perturbs all the input in the direction that will cause
the greatest error, with small magnitude.
"""
loss = tf.nn.softmax_cross_entropy_with_logits(
labels=y_true, logits=y_hat)
grad, = tf.gradients(loss, x)
scaled_grad = epsilon * tf.sign(grad)
return tf.stop_gradient(x + scaled_grad) | 4,677 |
def clear_discovery_hash(hass, discovery_hash):
"""Clear entry in ALREADY_DISCOVERED list."""
if ALREADY_DISCOVERED not in hass.data:
# Discovery is shutting down
return
del hass.data[ALREADY_DISCOVERED][discovery_hash] | 4,678 |
def tour_delete(request,id):
""" delete tour depending on id """
success_message, error_message = None, None
form = TourForm()
tour = get_object_or_404(Tour, id=id)
tours = Tour.objects.all()
if request.method=="POST":
tour.delete()
success_message = "deleted tour"
else:
error_message = "to delete tour"
context = {
'form': form,
'tours': tours,
'success_message': success_message,
'error_message': error_message,
'user_info': Employee.objects.get(employee_id=request.user.username),
'cart': Cart.objects.filter(created_by__employee_id=request.user.username).count,
}
return render(request, 'employee/tour_add.html', context) | 4,679 |
def d3():
"""Simulate the roll of a 3 sided die"""
return random.randint(1, 3) | 4,680 |
def split_file_name(file_name):
"""
splits the name from the file name.
:param file_name:
:return:
"""
return os.path.splitext(file_name)[0] | 4,681 |
def run_as_root(command, printable=True, silent_start=False):
"""
General purpose wrapper for running a subprocess as root user
"""
sudo_command = "sudo {}".format(command)
return run_command(sudo_command,
error_msg="",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=None,
printable=printable,
silent_start=silent_start) | 4,682 |
def test_one_time_bonus_value():
"""Check that the value over date ranges is correct
Over 5 years, get the bonus, not more, only two week have the bonus"""
base_date = datetime.date(2018, 1, 1)
payoff_date = datetime.date(2018, 6, 6)
bonus_amount = 100000
bonus = PeriodicBonus(
amount=bonus_amount,
date_seq=[payoff_date, payoff_date + datetime.timedelta(weeks=52)],
)
num_diff_zero = 0
acc = 0
for d in range(52 * 5):
start = base_date + datetime.timedelta(weeks=d)
end = start + datetime.timedelta(days=6)
v = bonus.value(start, end)
if v != 0:
num_diff_zero += 1
acc += v
assert acc == bonus_amount * 2
assert num_diff_zero == 2 | 4,683 |
def generate_patches(in_file, out_file, mode, remove, train_fraction, seed, epsilon, min_points):
"""
Sample the data from the source file and divide them into training and test data through a selection matrix
Result is stored in the given output HDF5 file.
:param in_file: str containing the filename of the basic .hdf5 file
:param out_file: str containing the path where the hdf5-file will be saved
:param mode: str, deciding how to split training and test data, can be 'random', 'size' or 'stddev'
:param remove: list(int), list of class labels to remove from the ground-truth (e.g. background)
:param train_fraction: positive float determining the fraction of the data used for training
:param seed: positive, int random seed for reproducibility of the random sampling
:param epsilon: positive, float, cluster sampling search radius
:param min_points: positive, int, cluster sampling density threshold
"""
# read data
start = time.time()
print('\t\tReading data source... ', end='', flush=True)
data, labels = read_datasets(in_file, DATA, LABELS)
print('\t\t\t[Done, took: {:.2f}s]'.format(time.time() - start))
# create train/test selection mask
start = time.time()
print('\t\tCreating selection mask... ', end='', flush=True)
if mode == RANDOM:
selection = create_random_selection(labels, remove, train_fraction, seed)
else:
selection = create_cluster_selection(mode, data, labels, remove, train_fraction, epsilon, min_points)
print('\t\t[Done, took: {:.2f}s]'.format(time.time() - start))
# determine normalization coefficients
start = time.time()
print('\t\tCalculate normalization coefficients...', end='', flush=True)
mean = data[selection == TRAIN].reshape(-1, data.shape[-1]).mean(axis=0).reshape(1, 1, -1)
stddev = data[selection == TRAIN].reshape(-1, data.shape[-1]).std(axis=0).reshape(1, 1, -1)
print('\t[Done, took: {:.2f}s]'.format(time.time() - start))
# store the selected data
start = time.time()
print('\t\tSaving data file ... ', end='', flush=True)
safe_patches(out_file, mode, remove, train_fraction, seed, epsilon, min_points, selection, mean, stddev)
print('\t\t\t[Done, took: {:.2f}s]'.format(time.time() - start)) | 4,684 |
def _search_qr(model, identifier, session):
"""Search the database using a Query/Retrieve *Identifier* query.
Parameters
----------
model : pydicom.uid.UID
Either *Patient Root Query Retrieve Information Model* or *Study Root
Query Retrieve Information Model* for C-FIND, C-GET or C-MOVE.
identifier : pydicom.dataset.Dataset
The request's *Identifier* dataset.
session : sqlalchemy.orm.session.Session
The session we are using to query the database.
Returns
-------
list of db.Instance
The Instances that match the query.
"""
# Will raise InvalidIdentifier if check failed
_check_identifier(identifier, model)
if model in _PATIENT_ROOT:
attr = _PATIENT_ROOT[model]
else:
attr = _STUDY_ROOT[model]
# Hierarchical search method: C.4.1.3.1.1
query = None
for level, keywords in attr.items():
# Keywords at current level that are in the identifier
keywords = [kw for kw in keywords if kw in identifier]
# Create query dataset for only the current level and run it
ds = Dataset()
[setattr(ds, kw, getattr(identifier, kw)) for kw in keywords]
query = build_query(ds, session, query)
if level == identifier.QueryRetrieveLevel:
break
return query.all() | 4,685 |
def produce_summary_pdf(model_name, img_path, hyperparams, model_arch, train_stats):
"""
Produce a summary pdf containing configuration used, training curve,
model architecture and epoch training summary
:param model_name: name of current experiment/model being run
:param hyperparams: dict of all the model configuration
:param model_arch: nn.Module object to print
:param train_stats: Dictionary containing training/test loss and accuracy as well as total duration
"""
# datetime object containing current date and time
now = datetime.now()
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
pdf = FPDF()
pdf.set_title("training_summary_{}_{}".format(model_name.lower(), dt_string))
pdf.add_page()
pdf.set_xy(0, 10)
pdf.set_font("Helvetica", "BI", 16)
pdf.set_text_color(25, 33, 78)
pdf.set_draw_color(25, 33, 78)
pdf.cell(20)
pdf.cell(
200,
10,
"Model Training Summary: {}".format(model_name.upper()),
0,
2,
)
pdf.set_font("Helvetica", "B", 12)
pdf.cell(
200,
5,
dt_string,
0,
2,
)
# Model Configuration Section
pdf.cell(150, 10, "Model Configuration:", 0, 2)
pdf.cell(30, 10, "Parameter", 1, 0)
pdf.cell(140, 10, "Value", 1, 2)
pdf.set_text_color(255, 96, 80)
pdf.set_font("Helvetica", "", 12)
pdf.cell(-30)
attributes = [
"model_dir",
"log_dir",
"check_dir",
"current_epoch",
"overwrite",
"exp_name",
]
for i, val in enumerate(hyperparams):
if val not in attributes:
pdf.cell(30, 10, "%s" % (val), 1, 0)
pdf.cell(140, 10, "%s" % (hyperparams[val]), 1, 2)
pdf.cell(-30)
pdf.cell(90, 3, "", 0, 2)
# Model Performance Section
pdf.set_text_color(25, 33, 78)
pdf.set_font("Helvetica", "B", 12)
pdf.cell(150, 10, "Model Performance Stats:", 0, 2)
pdf.set_font("Helvetica", "", 12)
loss = train_stats["test_loss"]
acc = train_stats["test_acc"]
pdf.set_text_color(255, 96, 80)
pdf.cell(35, 6, "Best Loss:", 0, 0)
pdf.cell(
45, 6, "{:.3f} (Epoch {})".format(min(loss), loss.index(min(loss)) + 1), 0, 0
)
pdf.cell(60, 6, "Training Duration:", 0, 0)
pdf.cell(30, 6, "{:.3f} (s)".format(train_stats["total_dur"]), 0, 2)
pdf.cell(-140)
pdf.cell(35, 6, f"Best Accuracy:", 0, 0)
pdf.cell(45, 6, "{:.3f} (Epoch {})".format(max(acc), acc.index(max(acc)) + 1), 0, 0)
pdf.cell(60, 6, "Average Epoch Duration:", 0, 0)
pdf.cell(
30,
6,
"{:.3f} (s)".format(train_stats["total_dur"] / hyperparams["current_epoch"]),
0,
2,
)
pdf.cell(-140)
pdf.cell(90, 3, "", 0, 2)
# Loss Curve Section
pdf.set_text_color(25, 33, 78)
pdf.set_font("Helvetica", "B", 12)
pdf.cell(150, 10, "Model Loss Curve:", 0, 2)
pdf.image(img_path, x=None, y=None, w=160, h=0, type="PNG", link="")
# Second Page of Report
pdf.add_page()
pdf.set_xy(0, 0)
pdf.cell(20, 20)
# Model Arch Section
pdf.cell(150, 20, "Model Configuration:", 0, 2)
pdf.set_font("Helvetica", "", 12)
if model_arch is None:
model_arch = "No model configuration was provided"
pdf.set_text_color(255, 96, 80)
pdf.multi_cell(180, 8, str(model_arch))
# Third Page of Report
pdf.add_page()
pdf.set_xy(0, 0)
pdf.cell(20, 20, " ")
# Training Loss Section
pdf.set_text_color(25, 33, 78)
pdf.set_font("Helvetica", "B", 12)
pdf.cell(150, 20, "Detailed Loss Output:", 0, 2)
pdf.cell(40, 8, "Epoch", 1, 0, "C")
pdf.cell(30, 8, "Train Loss", 1, 0, "C")
pdf.cell(30, 8, "Test Loss", 1, 0, "C")
pdf.cell(30, 8, "Train Acc", 1, 0, "C")
pdf.cell(30, 8, "Test Acc", 1, 2, "C")
pdf.set_text_color(255, 96, 80)
pdf.set_font("Helvetica", "", 12)
pdf.cell(-130)
for i in range(0, len(train_stats["train_loss"])):
pdf.cell(40, 8, "{}".format((i + 1)), 1, 0, "C")
pdf.cell(30, 8, "{:.3f}".format((train_stats["train_loss"][i])), 1, 0, "C")
pdf.cell(30, 8, "{:.3f}".format((train_stats["test_loss"][i])), 1, 0, "C")
pdf.cell(30, 8, "{:.3f}".format((train_stats["train_acc"][i])), 1, 0, "C")
pdf.cell(30, 8, "{:.3f}".format((train_stats["test_acc"][i])), 1, 2, "C")
pdf.cell(-130)
pdf.cell(90, 3, "", 0, 2)
pdf.output(
os.path.join(
os.path.dirname(img_path),
"training_summary_{}.pdf".format(model_name.lower()),
),
"F",
) | 4,686 |
def test_homodyne_mode_kwargs():
"""Test that S gates and Homodyne mesurements are applied to the correct modes via the `modes` kwarg.
Here the initial state is a "diagonal" (angle=pi/2) squeezed state in mode 0
and a "vertical" (angle=0) squeezed state in mode 1.
Because the modes are separable, measuring in one mode should leave the state in the
other mode unchaged.
"""
S1 = Sgate(modes=[0], r=1, phi=np.pi / 2)
S2 = Sgate(modes=[1], r=1, phi=0)
initial_state = Vacuum(2) >> S1 >> S2
final_state = initial_state << Homodyne(modes=[1], quadrature_angle=0, result=[0.3])
expected_state = Vacuum(1) >> S1
assert np.allclose(final_state.dm(), expected_state.dm()) | 4,687 |
def retrieve_zoom_metadata(
stage=None, zoom_api=None, file_key=None, log=None, **attributes
):
"""General function to retrieve metadata from various Zoom endpoints."""
if "id" in attributes:
api_response = zoom_api(id=attributes["id"])
elif "meeting_id" in attributes:
api_response = zoom_api(meeting_id=attributes["meeting_id"])
log.debug(
stage,
reason="Received Zoom",
response=api_response,
response_content=api_response.content,
)
api_content = json.loads(api_response.content)
if not api_response.ok:
reason = api_content["message"] if "message" in api_content else "unknown"
log.error(stage, reason=reason, response=api_response.content)
raise RuntimeError(f"Retrieve Zoom meeting details failed: {reason}")
if file_key:
s3_object = s3.Object(RECORDINGS_BUCKET, file_key)
response = s3_object.put(
Body=json.dumps(api_content), ContentType="application/json"
)
log.debug(stage, reason="Put meeting details", response=response)
log.info(stage, reason="Meeting details", details=api_content)
return api_content | 4,688 |
def find_max_value(binary_tree):
"""This function takes a binary tree and returns the largest value of all the nodes in that tree
with O(N) space and O(1) time using breadth first traversal while keeping track of the largest value thus far
in the traversal
"""
root_node = []
rootnode.push(binary_tree.root)
output = []
# helper function
def is_Null(current_value):
"""this is a helper function to check if the value of all nodes in breadth first traversal have null values
which means we have gone off the bottom depth of the tree and returns a boolean"""
return current_value == null
def _walk(input_list):
"""This is the recursive function in our breadth first traversal which implements a queue without the queue class
this function returns the value of each node until all node values are returned; the base case is when all
values of the nodes are null, which means we have gone off the bottom depth of the tree
"""
counter = 0
largest_value = 0
newNodes = []
while counter < len(input_list):
if input_list[counter]:
if input_list[counter].value > largest_value:
largest_value = input_list[counter]
print('new value: ', input_list[counter])
output.push(input_list[counter])
newNodes.push(input_list[counter].left)
newNodes.push(input_list[counter].right)
print('newNodes: ', len(newNodes), '\n', newNodes)
if not all is_Null(newNodes):
_walk(newNodes)
_walk(root_node)
return 'largest value ' + largest_value | 4,689 |
def failed_jobs(username, root_wf_id, wf_id):
"""
Get a list of all failed jobs of the latest instance for a given workflow.
"""
dashboard = Dashboard(g.master_db_url, root_wf_id, wf_id)
args = __get_datatables_args()
total_count, filtered_count, failed_jobs_list = dashboard.get_failed_jobs(
wf_id, **args
)
for job in failed_jobs_list:
job.exec_job_id = '<a href="' + url_for(
'.job',
root_wf_id=root_wf_id,
wf_id=wf_id,
job_id=job.job_id,
job_instance_id=job.job_instance_id
) + '">' + job.exec_job_id + '</a>'
job.stdout = '<a target="_blank" href="' + url_for(
'.stdout',
root_wf_id=root_wf_id,
wf_id=wf_id,
job_id=job.job_id,
job_instance_id=job.job_instance_id
) + '">Application Stdout/Stderr</a>'
job.stderr = '<a target="_blank" href="' + url_for(
'.stderr',
root_wf_id=root_wf_id,
wf_id=wf_id,
job_id=job.job_id,
job_instance_id=job.job_instance_id
) + '">Condor Stderr/Pegasus Lite Log</a>'
return render_template(
'workflow/jobs_failed.xhr.json',
count=total_count,
filtered=filtered_count,
jobs=failed_jobs_list,
table_args=args
) | 4,690 |
def test_action_in_alfred(alfred4):
"""Action."""
paths = ["~/Documents", "~/Desktop"]
script = (
'Application("com.runningwithcrayons.Alfred")'
'.action(["~/Documents", "~/Desktop"]);'
)
cmd = ["/usr/bin/osascript", "-l", "JavaScript", "-e", script]
with MockCall() as m:
action_in_alfred(paths)
assert m.cmd == cmd | 4,691 |
def get_vm_types(resources):
"""
Get all vm_types for a list of heat resources, do note that
some of the values retrieved may be invalid
"""
vm_types = []
for v in resources.values():
vm_types.extend(list(get_vm_types_for_resource(v)))
return set(vm_types) | 4,692 |
def plot_all_strings(args, plot_frequency=1, start_iteration=1, end_iteration=None, rescale=False, legend=True,
twoD=False,
plot_restrained=False, cv_indices=None, plot_convergence=True, plot_reference_structures=True):
"""Find all string-paths and plot them"""
runner = rs.StringIterationRunner(args)
runner.cvs = np.array(runner.cvs)
if cv_indices is None:
cv_indices = [i for i in range(runner.stringpath.shape[1])]
plt.title("ALL STRINGS")
if plot_reference_structures:
plot_reference_structures(runner, rescale=rescale, twoD=twoD)
last = None
convergences = []
for i in range(start_iteration, 2000 if end_iteration is None else end_iteration):
try:
runner.init_iteration(i)
path = runner.stringpath[:, cv_indices]
# path = np.loadtxt(runner.working_dir + runner.string_filepath % i)
if last is not None:
if len(last) == len(path):
dist = np.linalg.norm(last - path)
convergence = dist / np.linalg.norm(path)
logger.info("Converge between iterations %s and %s: %s. Absolute distance: %s", i - 1, i,
convergence,
dist)
convergences.append(convergence)
else:
logger.warn("Number points differ between iterations %s and %s", i, i - 1)
convergences.append(np.nan)
if (i + start_iteration - 1) % plot_frequency == 0:
plotpath = colvars.rescale_evals(path, runner.cvs[cv_indices]) if rescale else path
utils.plot_path(plotpath, label="Stringpath %s" % i, text=None, legend=legend, twoD=twoD,
axis_labels=[get_cv_description(cv.id, use_simpler_names=True) for cv in
np.array(runner.cvs)[cv_indices]])
if plot_restrained:
restrainedpath = SingleIterationPostProcessor(runner).compute_string_from_restrained()
restrainedpath = colvars.rescale_evals(restrainedpath, runner.cvs) if rescale else restrainedpath
utils.plot_path(restrainedpath, label="Restrained {}".format(i), twoD=twoD)
plt.grid()
# utils.plot_path(plotpath, label="Stringpath %s" % i, text=None, legend=legend)
last = path
except IOError as err:
tb = traceback.format_exc()
logger.error(tb)
logger.info("Did not find string %s in filepath %s. Not looking for sequential strings", i,
runner.string_filepath)
break
if last is None:
return
if legend:
plt.legend()
plt.show()
if plot_convergence:
plt.plot(convergences)
plt.ylabel(r'$|\bar{s_i}-\bar{s}_{i+1}|/|\bar{s}_{i+1}|$')
plt.xlabel(r"i")
plt.title("Convergence")
plt.show() | 4,693 |
def list_files(directory, suffix='.nc'):
"""
Return a list of all the files with the specified suffix in the submission
directory structure and sub-directories.
:param str directory: The root directory of the submission
:param str suffix: The suffix of the files of interest
:returns: A list of absolute filepaths
"""
nc_files = []
dir_files = os.listdir(directory)
for filename in dir_files:
file_path = os.path.join(directory, filename)
if os.path.isdir(file_path):
nc_files.extend(list_files(file_path))
elif file_path.endswith(suffix):
nc_files.append(file_path)
return nc_files | 4,694 |
def interface_names(obj):
"""
Return: a list of interface names to which `obj' is conformant.
The list begins with `obj' itself if it is an interface.
Names are returned in depth-first order, left to right.
"""
return [o.__name__ for o in interfaces(obj)] | 4,695 |
def add_basic(token):
"""For use with Authorization headers, add "Basic "."""
if token:
return (u"Basic " if isinstance(token, six.text_type) else b"Basic ") + token
else:
return token | 4,696 |
def plot_convergence(x,y):
"""
Visualize the convergence of the sensitivity indices
takes two arguments : x,y input and model output samples
return plot of sensitivity indices wrt number of samples
"""
try:
ninput = x.shape[1]
except (ValueError, IndexError):
ninput = x.size
try:
noutput = y.shape[1]
except (ValueError, IndexError):
noutput = y.size
nsamples = x.shape[0]
trials = (nsamples-30)//10
all_si_c = np.zeros((trials, ninput)) # ninput
for i in range(30,nsamples,10):
modProblem = {
'num_vars': ninput,
'names': inputLabels,
'bounds': bounds
}
# all_si_c[(i-30)//10,:] = rbdfast(y[:i],x=x[:i,:])[1]
res = rbd_fast.analyze(modProblem, x[:i], y[:i], M=10)
all_si_c[((i-30)//10)-1, ] = res['S1']
plt.plot([all_si_c[i].mean() for i in range(trials)])
# for i in range(trials):
# plt.plot(all_si_c[i].mean(),'k-')
plt.show()
return | 4,697 |
def updateUser(token, leaderboard=None, showUsername=None, username=None):
"""
Update user account information.
Parameters-
token: Authentication token.
leaderboard: True to show user's profit on leaderboard.
showUsername: True to show the username on LN Marktes public data.
username: username to display.
"""
headers = {
'content-type': "application/json",
'accept': "application/json",
'authorization': f"Bearer {token}",
}
payloadDict = dict()
if showUsername is not None:
payloadDict['show_username'] = showUsername
if leaderboard is not None:
payloadDict['show_leaderboard'] = leaderboard
if username is not None:
payloadDict['username'] = username
payload = json.dumps(payloadDict)
userInfo = requests.put(
APIUrls.lnapi+APIUrls.userUrl,
data=payload,
headers=headers,
)
if userInfo.status_code == 200:
return userInfo.json()
else:
raise RuntimeError(
'Unable to update user information:\n'
f'{userInfo.text}'
) | 4,698 |
def test_plot_segments(sersic_2d_image, segm_and_cat):
"""Test segment plotting functions"""
cat, segm, segm_deblend = segm_and_cat
plot_segments(segm, vmax=1, vmin=0)
plot_segments(segm_deblend, vmax=1, vmin=0)
plot_segment_residual(segm, sersic_2d_image.data, vmax=1, vmin=0) | 4,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.