Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
5,300 | def main():
desc =
parser = argparse.ArgumentParser(description=desc, prog=)
parser.add_argument(, metavar=,
choices=[, , , ],
help=)
parser.add_argument(, metavar=,
choices=[, , , ],
help=)
desc =
desc +=
parser.add_argument(, metavar=, help=desc)
parser.add_argument(, dest=, default=0, metavar=,
type=float, help=)
parser.add_argument(, dest=, metavar=, type=float,
default=0,
help=)
parser.add_argument(, , dest=, metavar=,
type=argparse.FileType(), default=STDIN,
help=)
parser.add_argument(, , dest=, metavar=,
type=argparse.FileType(), default=STDOUT,
help=)
args = parser.parse_args()
array = np.loadtxt(args.file_in, ndmin=2)
if in [args.source, args.dest] and len(args.date) < 14:
desc =
raise ValueError(desc)
if 9 <= len(args.date) <= 13:
desc = + \
[:len(args.date)]
raise ValueError(desc)
datetime = dt.datetime.strptime(args.date,
[:len(args.date)-2])
A = apexpy.Apex(date=datetime, refh=args.refh)
lats, lons = A.convert(array[:, 0], array[:, 1], args.source, args.dest,
args.height, datetime=datetime)
np.savetxt(args.file_out, np.column_stack((lats, lons)), fmt=) | Entry point for the script |
5,301 | def get_and_alter(self, function):
check_not_none(function, "function can't be None")
return self._encode_invoke(atomic_reference_get_and_alter_codec, function=self._to_data(function)) | Alters the currently stored reference by applying a function on it on and gets the old value.
:param function: (Function), A stateful serializable object which represents the Function defined on
server side.
This object must have a serializable Function counter part registered on server side with the actual
``org.hazelcast.core.IFunction`` implementation.
:return: (object), the old value, the value before the function is applied. |
5,302 | def from_weight_map(cls, pixel_scale, weight_map):
np.seterr(divide=)
noise_map = 1.0 / np.sqrt(weight_map)
noise_map[noise_map == np.inf] = 1.0e8
return NoiseMap(array=noise_map, pixel_scale=pixel_scale) | Setup the noise-map from a weight map, which is a form of noise-map that comes via HST image-reduction and \
the software package MultiDrizzle.
The variance in each pixel is computed as:
Variance = 1.0 / sqrt(weight_map).
The weight map may contain zeros, in which cause the variances are converted to large values to omit them from \
the analysis.
Parameters
-----------
pixel_scale : float
The size of each pixel in arc seconds.
weight_map : ndarray
The weight-value of each pixel which is converted to a variance. |
5,303 | def validate(self):
if self.job_record is None:
self.tree.timetable.assign_job_record(self)
next_timeperiod = time_helper.increment_timeperiod(self.time_qualifier, self.timeperiod)
has_younger_sibling = next_timeperiod in self.parent.children
all_children_skipped = True
all_children_finished = True
for timeperiod, child in self.children.items():
child.validate()
if child.job_record.is_active:
all_children_finished = False
if not child.job_record.is_skipped:
all_children_skipped = False
if all_children_finished is False and self.job_record.is_finished:
self.tree.timetable.reprocess_tree_node(self)
if len(self.children) != 0 \
and all_children_skipped \
and self.tree.build_timeperiod is not None \
and has_younger_sibling is True \
and not self.job_record.is_skipped:
self.tree.timetable.skip_tree_node(self) | method traverse tree and performs following activities:
* requests a job record in STATE_EMBRYO if no job record is currently assigned to the node
* requests nodes for reprocessing, if STATE_PROCESSED node relies on unfinalized nodes
* requests node for skipping if it is daily node and all 24 of its Hourly nodes are in STATE_SKIPPED state |
5,304 | def find(self, resource, req, sub_resource_lookup):
args = getattr(req, , request.args if request else {}) or {}
source_config = config.SOURCES[resource]
if args.get():
query = json.loads(args.get())
if not in query.get(, {}):
_query = query.get()
query[] = {: {}}
if _query:
query[][][] = _query
else:
query = {: {: {}}}
if args.get(, None):
query[][][] = _build_query_string(args.get(),
default_field=args.get(, ),
default_operator=args.get(, ))
if not in query:
if req.sort:
sort = ast.literal_eval(req.sort)
set_sort(query, sort)
elif self._default_sort(resource) and not in query[][]:
set_sort(query, self._default_sort(resource))
if req.max_results:
query.setdefault(, req.max_results)
if req.page > 1:
query.setdefault(, (req.page - 1) * req.max_results)
filters = []
filters.append(source_config.get())
filters.append(source_config.get(, noop)())
filters.append({: _build_lookup_filter(sub_resource_lookup)} if sub_resource_lookup else None)
filters.append(json.loads(args.get()) if in args else None)
filters.extend(args.get() if in args else [])
if req.where:
try:
filters.append({: json.loads(req.where)})
except ValueError:
try:
filters.append({: parse(req.where)})
except ParseError:
abort(400)
set_filters(query, filters)
if in source_config:
query[] = source_config[]
if in source_config and self.should_aggregate(req):
query[] = source_config[]
if in source_config and self.should_highlight(req):
query_string = query[].get(, {}).get(, {}).get()
highlights = source_config.get(, noop)(query_string)
if highlights:
query[] = highlights
query[].setdefault(, False)
source_projections = None
if self.should_project(req):
source_projections = self.get_projected_fields(req)
args = self._es_args(resource, source_projections=source_projections)
try:
hits = self.elastic(resource).search(body=query, **args)
except elasticsearch.exceptions.RequestError as e:
if e.status_code == 400 and "No mapping found for" in e.error:
hits = {}
elif e.status_code == 400 and in e.error:
raise InvalidSearchString
else:
raise
return self._parse_hits(hits, resource) | Find documents for resource. |
5,305 | def solveConsAggShock(solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,PermGroFac,
PermGroFacAgg,aXtraGrid,BoroCnstArt,Mgrid,AFunc,Rfunc,wFunc,DeprFac):
t handle cubic splines, nor can it calculate a value function.
Parameters
----------
solution_next : ConsumerSolution
The solution to the succeeding one period problem.
IncomeDstn : [np.array]
A list containing five arrays of floats, representing a discrete
approximation to the income process between the period being solved
and the one immediately following (in solution_next). Order: event
probabilities, idisyncratic permanent shocks, idiosyncratic transitory
shocks, aggregate permanent shocks, aggregate transitory shocks.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion.
PermGroFac : float
Expected permanent income growth factor at the end of this period.
PermGroFacAgg : float
Expected aggregate productivity growth factor.
aXtraGrid : np.array
Array of "extra" end-of-period asset values-- assets above the
absolute minimum acceptable level.
BoroCnstArt : float
Artificial borrowing constraint; minimum allowable end-of-period asset-to-
permanent-income ratio. Unlike other models, this *can
kNextEff_array = kNext_array/TranShkAggValsNext_tiled
R_array = Rfunc(kNextEff_array)
Reff_array = R_array/LivPrb
wEff_array = wFunc(kNextEff_array)*TranShkAggValsNext_tiled
PermShkTotal_array = PermGroFac*PermGroFacAgg*PermShkValsNext_tiled*PermShkAggValsNext_tiled
Mnext_array = kNext_array*R_array + wEff_array
aNrmMin_candidates = PermGroFac*PermGroFacAgg*PermShkValsNext_tiled[:,0,:]*PermShkAggValsNext_tiled[:,0,:]/Reff_array[:,0,:]*\
(mNrmMinNext(Mnext_array[:,0,:]) - wEff_array[:,0,:]*TranShkValsNext_tiled[:,0,:])
aNrmMin_vec = np.max(aNrmMin_candidates,axis=1)
BoroCnstNat_vec = aNrmMin_vec
aNrmMin_tiled = np.tile(np.reshape(aNrmMin_vec,(Mcount,1,1)),(1,aCount,ShkCount))
aNrmNow_tiled = aNrmMin_tiled + aXtra_tiled
mNrmNext_array = Reff_array*aNrmNow_tiled/PermShkTotal_array + TranShkValsNext_tiled*wEff_array
vPnext_array = Reff_array*PermShkTotal_array**(-CRRA)*vPfuncNext(mNrmNext_array,Mnext_array)
EndOfPrdvP = DiscFac*LivPrb*np.sum(vPnext_array*ShkPrbsNext_tiled,axis=2)
cNrmNow = EndOfPrdvP**(-1.0/CRRA)
mNrmNow = aNrmNow_tiled[:,:,0] + cNrmNow
cFuncBaseByM_list = []
for j in range(Mcount):
c_temp = np.insert(cNrmNow[j,:],0,0.0)
m_temp = np.insert(mNrmNow[j,:] - BoroCnstNat_vec[j],0,0.0)
cFuncBaseByM_list.append(LinearInterp(m_temp,c_temp))
BoroCnstNat = LinearInterp(np.insert(Mgrid,0,0.0),np.insert(BoroCnstNat_vec,0,0.0))
cFuncBase = LinearInterpOnInterp1D(cFuncBaseByM_list,Mgrid)
cFuncUnc = VariableLowerBoundFunc2D(cFuncBase,BoroCnstNat)
cFuncCnst = BilinearInterp(np.array([[0.0,0.0],[1.0,1.0]]),
np.array([BoroCnstArt,BoroCnstArt+1.0]),np.array([0.0,1.0]))
cFuncNow = LowerEnvelope2D(cFuncUnc,cFuncCnst)
mNrmMinNow = UpperEnvelope(BoroCnstNat,ConstantFunction(BoroCnstArt))
vPfuncNow = MargValueFunc2D(cFuncNow,CRRA)
solution_now = ConsumerSolution(cFunc=cFuncNow,vPfunc=vPfuncNow,mNrmMin=mNrmMinNow)
return solution_now | Solve one period of a consumption-saving problem with idiosyncratic and
aggregate shocks (transitory and permanent). This is a basic solver that
can't handle cubic splines, nor can it calculate a value function.
Parameters
----------
solution_next : ConsumerSolution
The solution to the succeeding one period problem.
IncomeDstn : [np.array]
A list containing five arrays of floats, representing a discrete
approximation to the income process between the period being solved
and the one immediately following (in solution_next). Order: event
probabilities, idisyncratic permanent shocks, idiosyncratic transitory
shocks, aggregate permanent shocks, aggregate transitory shocks.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion.
PermGroFac : float
Expected permanent income growth factor at the end of this period.
PermGroFacAgg : float
Expected aggregate productivity growth factor.
aXtraGrid : np.array
Array of "extra" end-of-period asset values-- assets above the
absolute minimum acceptable level.
BoroCnstArt : float
Artificial borrowing constraint; minimum allowable end-of-period asset-to-
permanent-income ratio. Unlike other models, this *can't* be None.
Mgrid : np.array
A grid of aggregate market resourses to permanent income in the economy.
AFunc : function
Aggregate savings as a function of aggregate market resources.
Rfunc : function
The net interest factor on assets as a function of capital ratio k.
wFunc : function
The wage rate for labor as a function of capital-to-labor ratio k.
DeprFac : float
Capital Depreciation Rate
Returns
-------
solution_now : ConsumerSolution
The solution to the single period consumption-saving problem. Includes
a consumption function cFunc (linear interpolation over linear interpola-
tions) and marginal value function vPfunc. |
5,306 | def get_position_d(self):
data = []
data.append(0x09)
data.append(self.servoid)
data.append(RAM_READ_REQ)
data.append(POSITION_KD_RAM)
data.append(BYTE2)
send_data(data)
rxdata = []
try:
rxdata = SERPORT.read(13)
return (ord(rxdata[10])*256)+(ord(rxdata[9])&0xff)
except HerkulexError:
raise HerkulexError("could not communicate with motors") | Get the D value of the current PID for position |
5,307 | def discardID(self, idVal):
for i in self:
if i.id == idVal:
self._collection.discard(i)
return | Checks if the collected items contains the give _idVal_ and discards it if it is found, will not raise an exception if item is not found
# Parameters
_idVal_ : `str`
> The discarded id string |
5,308 | def get_alignment_df(a_aln_seq, b_aln_seq, a_seq_id=None, b_seq_id=None):
if len(a_aln_seq) != len(b_aln_seq):
raise ValueError()
if not a_seq_id:
a_seq_id =
if not b_seq_id:
b_seq_id =
a_aln_seq = ssbio.protein.sequence.utils.cast_to_str(a_aln_seq)
b_aln_seq = ssbio.protein.sequence.utils.cast_to_str(b_aln_seq)
a_idx = 1
b_idx = 1
appender = []
for i, (a,b) in enumerate(zip(a_aln_seq, b_aln_seq)):
to_append = {}
if a == b and a != and b != :
aa_flag =
elif a != b and a == and b != :
aa_flag =
elif a != b and a != and b == :
aa_flag =
elif a != b and a != and b == :
aa_flag =
elif a != b and b != and a == :
aa_flag =
elif a != b and a != and b != :
aa_flag =
to_append[] = a_seq_id
to_append[] = b_seq_id
to_append[] = aa_flag
if aa_flag == or aa_flag == or aa_flag == :
to_append[] = a
to_append[] = int(a_idx)
to_append[] = b
to_append[] = int(b_idx)
a_idx += 1
b_idx += 1
if aa_flag == :
to_append[] = a
to_append[] = int(a_idx)
a_idx += 1
if aa_flag == :
to_append[] = b
to_append[] = int(b_idx)
b_idx += 1
appender.append(to_append)
cols = [, , , , , , ]
alignment_df = pd.DataFrame.from_records(appender, columns=cols)
alignment_df = alignment_df.fillna(value=np.nan)
return alignment_df | Summarize two alignment strings in a dataframe.
Args:
a_aln_seq (str): Aligned sequence string
b_aln_seq (str): Aligned sequence string
a_seq_id (str): Optional ID of a_seq
b_seq_id (str): Optional ID of b_aln_seq
Returns:
DataFrame: a per-residue level annotation of the alignment |
5,309 | def _load_values(self, db_key: str) -> dict:
if self._db.type(db_key) == :
db_values = self._db.lrange(db_key, 0, -1)
for i, value in enumerate(db_values):
try:
db_values[i] = ast.literal_eval(value)
except SyntaxError:
pass
except ValueError:
pass
else:
db_values = self._db.hgetall(db_key)
for _key, _value in db_values.items():
try:
db_values[_key] = ast.literal_eval(_value)
except SyntaxError:
pass
except ValueError:
pass
return db_values | Load values from the db at the specified key, db_key.
FIXME(BMo): Could also be extended to load scalar types (instead of
just list and hash) |
5,310 | def addCases(self, tupesValStmnts):
s = self
for val, statements in tupesValStmnts:
s = s.Case(val, statements)
return s | Add multiple case statements from iterable of tuleles
(caseVal, statements) |
5,311 | def search_playlist(self, playlist_name, quiet=False, limit=9):
result = self.search(playlist_name, search_type=1000, limit=limit)
if result[][] <= 0:
LOG.warning(, playlist_name)
raise SearchNotFound(.format(playlist_name))
else:
playlists = result[][]
if quiet:
playlist_id, playlist_name = playlists[0][], playlists[0][]
playlist = Playlist(playlist_id, playlist_name)
return playlist
else:
return self.display.select_one_playlist(playlists) | Search playlist by playlist name.
:params playlist_name: playlist name.
:params quiet: automatically select the best one.
:params limit: playlist count returned by weapi.
:return: a Playlist object. |
5,312 | def groupby(self, dimensions=None, container_type=None, group_type=None, **kwargs):
if dimensions is None:
dimensions = self.kdims
if not isinstance(dimensions, (list, tuple)):
dimensions = [dimensions]
container_type = container_type if container_type else type(self)
group_type = group_type if group_type else type(self)
outer_kdims = [self.get_dimension(d) for d in dimensions]
inner_kdims = [d for d in self.kdims if not d in outer_kdims]
outer_dynamic = issubclass(container_type, DynamicMap)
inner_dynamic = issubclass(group_type, DynamicMap)
if ((not outer_dynamic and any(not d.values for d in outer_kdims)) or
(not inner_dynamic and any(not d.values for d in inner_kdims))):
raise Exception(
)
if outer_dynamic:
def outer_fn(*outer_key, **dynkwargs):
if inner_dynamic:
def inner_fn(*inner_key, **dynkwargs):
outer_vals = zip(outer_kdims, util.wrap_tuple(outer_key))
inner_vals = zip(inner_kdims, util.wrap_tuple(inner_key))
inner_sel = [(k.name, v) for k, v in inner_vals]
outer_sel = [(k.name, v) for k, v in outer_vals]
return self.select(**dict(inner_sel+outer_sel))
return self.clone([], callback=inner_fn, kdims=inner_kdims)
else:
dim_vals = [(d.name, d.values) for d in inner_kdims]
dim_vals += [(d.name, [v]) for d, v in
zip(outer_kdims, util.wrap_tuple(outer_key))]
with item_check(False):
selected = HoloMap(self.select(**dict(dim_vals)))
return group_type(selected.reindex(inner_kdims))
if outer_kdims:
return self.clone([], callback=outer_fn, kdims=outer_kdims)
else:
return outer_fn(())
else:
outer_product = itertools.product(*[self.get_dimension(d).values
for d in dimensions])
groups = []
for outer in outer_product:
outer_vals = [(d.name, [o]) for d, o in zip(outer_kdims, outer)]
if inner_dynamic or not inner_kdims:
def inner_fn(outer_vals, *key, **dynkwargs):
inner_dims = zip(inner_kdims, util.wrap_tuple(key))
inner_vals = [(d.name, k) for d, k in inner_dims]
return self.select(**dict(outer_vals+inner_vals)).last
if inner_kdims or self.streams:
group = self.clone(callback=partial(inner_fn, outer_vals),
kdims=inner_kdims)
else:
group = inner_fn(outer_vals, ())
groups.append((outer, group))
else:
inner_vals = [(d.name, self.get_dimension(d).values)
for d in inner_kdims]
with item_check(False):
selected = HoloMap(self.select(**dict(outer_vals+inner_vals)))
group = group_type(selected.reindex(inner_kdims))
groups.append((outer, group))
return container_type(groups, kdims=outer_kdims) | Groups DynamicMap by one or more dimensions
Applies groupby operation over the specified dimensions
returning an object of type container_type (expected to be
dictionary-like) containing the groups.
Args:
dimensions: Dimension(s) to group by
container_type: Type to cast group container to
group_type: Type to cast each group to
dynamic: Whether to return a DynamicMap
**kwargs: Keyword arguments to pass to each group
Returns:
Returns object of supplied container_type containing the
groups. If dynamic=True returns a DynamicMap instead. |
5,313 | def trigger(self, event, filter=None, update=None, documents=None, ids=None, replacements=None):
if not self.has_trigger(event):
return
if documents is not None:
pass
elif ids is not None:
documents = self.find_by_ids(ids, read_use="primary")
elif filter is not None:
documents = self.find(filter, read_use="primary")
else:
raise Exception("Trigger couldn't filter documents")
for doc in documents:
getattr(doc, event)(update=update, replacements=replacements) | Trigger the after_save hook on documents, if present. |
5,314 | def Process(self, path):
path = re.sub(self.SYSTEMROOT_RE, r"%systemroot%", path, count=1)
path = re.sub(self.SYSTEM32_RE, r"%systemroot%\\system32", path, count=1)
matches_iter = self.WIN_ENVIRON_REGEX.finditer(path)
var_names = set(m.group(1).lower() for m in matches_iter)
results = [path]
for var_name in var_names:
try:
var_regex, var_value = self.vars_map[var_name]
except KeyError:
continue
if isinstance(var_value, string_types):
replacements = [var_value]
else:
replacements = var_value
processed_results = []
for result in results:
for repl in replacements:
processed_results.append(var_regex.sub(lambda _: repl, result))
results = processed_results
return results | Processes a given path.
Args:
path: Path (as a string) to post-process.
Returns:
A list of paths with environment variables replaced with their
values. If the mapping had a list of values for a particular variable,
instead of just one value, then all possible replacements will be
returned. |
5,315 | def _set_af_vrf(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("af_vrf_name",af_vrf.af_vrf, yang_name="af-vrf", rest_name="vrf", parent=self, is_container=, user_ordered=False, path_helper=self._path_helper, yang_keys=, extensions={u: {u: u, u: u, u: None, u: u, u: None, u: None, u: u}}), is_container=, yang_name="af-vrf", rest_name="vrf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: u, u: None, u: u, u: None, u: None, u: u}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "list",
: ,
})
self.__af_vrf = t
if hasattr(self, ):
self._set() | Setter method for af_vrf, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv4/ipv4_unicast/af_vrf (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_af_vrf is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_af_vrf() directly. |
5,316 | def create_filename_parser(self, base_dir):
if base_dir and self.file_pattern:
file_pattern = os.path.join(base_dir, self.file_pattern)
else:
file_pattern = self.file_pattern
return parser.Parser(file_pattern) if file_pattern else None | Create a :class:`trollsift.parser.Parser` object for later use. |
5,317 | def formatTextException(*args):
format = lambda x: re.sub(r"^(\s+)", lambda y: "{0} ".format("." * len(y.group(0))), x.rstrip().expandtabs(4))
verbose = 10
cls, instance, trcback = args
stack = foundations.exceptions.extract_stack(foundations.exceptions.get_inner_most_frame(trcback), verbose)
text = []
text.append(foundations.strings.to_string(cls))
text.append("")
for line in foundations.exceptions.format_exception(cls, instance, trcback):
text.append(format("{0}".format(format(line))))
text.append("")
text.append("An unhandled exception occured in {0} {1}!".format(Constants.application_name,
Constants.version))
text.append("Sequence of calls leading up to the exception, in their occurring order:")
text.append("")
for frame, file_name, line_number, name, context, index in stack:
location = "{0}{1}".format(name if name != "<module>" else "",
inspect.formatargvalues(*inspect.getargvalues(frame)))
text.append("File \"{0}\", line {1}, in {2}".format(file_name, line_number, location))
for i, line in enumerate(context):
if i == index:
text.append(format("\t{0} {1} <===".format(line_number - index + i, format(format(line)))))
else:
text.append(format("\t{0} {1}".format(line_number - index + i, format(format(line)))))
text.append("")
for line in traceback.format_exception_only(cls, instance):
text.append("{0}".format(format(line)))
text.append("")
text.append("Frames locals by stack ordering, innermost last:")
text.append("")
for frame, locals in foundations.exceptions.extract_locals(trcback):
name, file_name, line_number = frame
text.append("Frame \"{0}\" in \"{1}\" file, line {2}:".format(name, file_name, line_number))
arguments, nameless_args, keyword_args, locals = locals
has_arguments, has_locals = any((arguments, nameless_args, keyword_args)), any(locals)
has_arguments and text.append(format("\tArguments:"))
for key, value in arguments.iteritems():
text.append(format("\t\t{0} = {1}".format(key, value)))
for value in nameless_args:
text.append(format("\t\t{0}".format(value)))
for key, value in sorted(keyword_args.iteritems()):
text.append(format("\\tt{0} = {1}".format(key, value)))
has_locals and text.append(format("\tLocals:"))
for key, value in sorted(locals.iteritems()):
text.append(format("\t\t{0} = {1}".format(key, value)))
text.append("")
return text | Formats given exception as a text.
:param \*args: Arguments.
:type \*args: \*
:return: Exception text.
:rtype: unicode |
5,318 | def get_vlan_brief_output_vlan_vlan_name(self, **kwargs):
config = ET.Element("config")
get_vlan_brief = ET.Element("get_vlan_brief")
config = get_vlan_brief
output = ET.SubElement(get_vlan_brief, "output")
vlan = ET.SubElement(output, "vlan")
vlan_id_key = ET.SubElement(vlan, "vlan-id")
vlan_id_key.text = kwargs.pop()
vlan_name = ET.SubElement(vlan, "vlan-name")
vlan_name.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
5,319 | def _get_ip_address(self, request):
ipaddr = request.META.get("HTTP_X_FORWARDED_FOR", None)
if ipaddr:
return ipaddr.split(",")[0].strip()
return request.META.get("REMOTE_ADDR", "") | Get the remote ip address the request was generated from. |
5,320 | def init_logging(config):
verbose = config.get("verbose", 3)
enable_loggers = config.get("enable_loggers", [])
if enable_loggers is None:
enable_loggers = []
logger_date_format = config.get("logger_date_format", "%Y-%m-%d %H:%M:%S")
logger_format = config.get(
"logger_format",
"%(asctime)s.%(msecs)03d - <%(thread)d> %(name)-27s %(levelname)-8s: %(message)s",
)
formatter = logging.Formatter(logger_format, logger_date_format)
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(formatter)
logger = logging.getLogger(BASE_LOGGER_NAME)
if verbose >= 4:
logger.setLevel(logging.DEBUG)
elif verbose == 3:
logger.setLevel(logging.INFO)
elif verbose == 2:
logger.setLevel(logging.WARN)
elif verbose == 1:
logger.setLevel(logging.ERROR)
else:
logger.setLevel(logging.CRITICAL)
logger.propagate = False
for hdlr in logger.handlers[:]:
try:
hdlr.flush()
hdlr.close()
except Exception:
pass
logger.removeHandler(hdlr)
logger.addHandler(consoleHandler)
if verbose >= 3:
for e in enable_loggers:
if not e.startswith(BASE_LOGGER_NAME + "."):
e = BASE_LOGGER_NAME + "." + e
lg = logging.getLogger(e.strip())
lg.setLevel(logging.DEBUG) | Initialize base logger named 'wsgidav'.
The base logger is filtered by the `verbose` configuration option.
Log entries will have a time stamp and thread id.
:Parameters:
verbose : int
Verbosity configuration (0..5)
enable_loggers : string list
List of module logger names, that will be switched to DEBUG level.
Module loggers
~~~~~~~~~~~~~~
Module loggers (e.g 'wsgidav.lock_manager') are named loggers, that can be
independently switched to DEBUG mode.
Except for verbosity, they will inherit settings from the base logger.
They will suppress DEBUG level messages, unless they are enabled by passing
their name to util.init_logging().
If enabled, module loggers will print DEBUG messages, even if verbose == 3.
Example initialize and use a module logger, that will generate output,
if enabled (and verbose >= 2)::
_logger = util.get_module_logger(__name__)
[..]
_logger.debug("foo: '{}'".format(s))
This logger would be enabled by passing its name to init_logging()::
enable_loggers = ["lock_manager",
"property_manager",
]
util.init_logging(2, enable_loggers)
Log Level Matrix
~~~~~~~~~~~~~~~~
+---------+--------+---------------------------------------------------------------+
| Verbose | Option | Log level |
| level | +-------------+------------------------+------------------------+
| | | base logger | module logger(default) | module logger(enabled) |
+=========+========+=============+========================+========================+
| 0 | -qqq | CRITICAL | CRITICAL | CRITICAL |
+---------+--------+-------------+------------------------+------------------------+
| 1 | -qq | ERROR | ERROR | ERROR |
+---------+--------+-------------+------------------------+------------------------+
| 2 | -q | WARN | WARN | WARN |
+---------+--------+-------------+------------------------+------------------------+
| 3 | | INFO | INFO | **DEBUG** |
+---------+--------+-------------+------------------------+------------------------+
| 4 | -v | DEBUG | DEBUG | DEBUG |
+---------+--------+-------------+------------------------+------------------------+
| 5 | -vv | DEBUG | DEBUG | DEBUG |
+---------+--------+-------------+------------------------+------------------------+ |
5,321 | def delete_insight(self, project_key, insight_id):
projectOwner, projectId = parse_dataset_key(project_key)
try:
self._insights_api.delete_insight(projectOwner,
projectId,
insight_id)
except _swagger.rest.ApiException as e:
raise RestApiError(cause=e) | Delete an existing insight.
:params project_key: Project identifier, in the form of
projectOwner/projectId
:type project_key: str
:params insight_id: Insight unique id
:type insight_id: str
:raises RestApiException: If a server error occurs
Examples
--------
>>> import datadotworld as dw
>>> api_client = dw.api_client()
>>> del_insight = api_client.delete_insight(
... 'username/project', 'insightid') # doctest: +SKIP |
5,322 | def reset(self):
self.clear()
self._initialise()
self.configspec = None
self._original_configspec = None | Clear ConfigObj instance and restore to 'freshly created' state. |
5,323 | def to_bytes(string):
assert isinstance(string, basestring)
if sys.version_info[0] >= 3:
if isinstance(string, str):
return string.encode()
else:
return string
else:
if isinstance(string, unicode):
return string.encode()
else:
return string | Convert a string (bytes, str or unicode) to bytes. |
5,324 | def get_list(self, size=100, startIndex=0, searchText="", sortProperty="", sortOrder=, status=):
url = urljoin(BASEURL, "sites", "list")
params = {
: self.token,
: size,
: startIndex,
: sortOrder,
: status
}
if searchText:
params[] = searchText
if sortProperty:
params[] = sortProperty
r = requests.get(url, params)
r.raise_for_status()
return r.json() | Request service locations
Returns
-------
dict |
5,325 | def on_module(self, node):
out = None
for tnode in node.body:
out = self.run(tnode)
return out | Module def. |
5,326 | def overlap(self, spectrum):
swave = self.wave[np.where(self.throughput != 0)]
s1, s2 = swave.min(), swave.max()
owave = spectrum[0]
o1, o2 = owave.min(), owave.max()
if (s1 >= o1 and s2 <= o2):
ans =
elif (s2 < o1) or (o2 < s1):
ans =
else:
ans =
return ans | Tests for overlap of this filter with a spectrum
Example of full overlap:
|---------- spectrum ----------|
|------ self ------|
Examples of partial overlap: :
|---------- self ----------|
|------ spectrum ------|
|---- spectrum ----|
|----- self -----|
|---- self ----|
|---- spectrum ----|
Examples of no overlap: :
|---- spectrum ----| |---- other ----|
|---- other ----| |---- spectrum ----|
Parameters
----------
spectrum: sequence
The [W, F] spectrum with astropy units
Returns
-------
ans : {'full', 'partial', 'none'}
Overlap status. |
5,327 | def addTab(self, widget, *args):
widget.dirty_changed.connect(self._on_dirty_changed)
super(CodeEditTabWidget, self).addTab(widget, *args) | Re-implements addTab to connect to the dirty changed signal and setup
some helper attributes.
:param widget: widget to add
:param args: optional addtional arguments (name and/or icon). |
5,328 | def get_value(self, key):
try:
return self.idb.get_value(key)
except (KeyError, EnvironmentError):
pass
try:
return self.directory.get_value(key)
except (KeyError, EnvironmentError):
pass
try:
return self.user.get_value(key)
except KeyError:
pass
try:
return self.system.get_value(key)
except KeyError:
pass
raise KeyError("key not found") | Fetch the settings value with the highest precedence for the given
key, or raise KeyError.
Precedence:
- IDB scope
- directory scope
- user scope
- system scope
type key: basestring
rtype value: Union[basestring, int, float, List, Dict] |
5,329 | def pixel_width(self):
return self.zoom_factor * ((self._finish - self._start) / self._resolution) | Width of the whole TimeLine in pixels
:rtype: int |
5,330 | def as_dict(self):
drepr = super(PlayMeta, self).as_dict()
drepr["details"] = [meta.as_dict() for meta in self._metas]
return drepr | Pre-serialisation of the meta data |
5,331 | def list_runner_book(self, market_id, selection_id, handicap=None, price_projection=None, order_projection=None,
match_projection=None, include_overall_position=None, partition_matched_by_strategy_ref=None,
customer_strategy_refs=None, currency_code=None, matched_since=None, bet_ids=None, locale=None,
session=None, lightweight=None):
params = clean_locals(locals())
method = % (self.URI, )
(response, elapsed_time) = self.request(method, params, session)
return self.process_response(response, resources.MarketBook, elapsed_time, lightweight) | Returns a list of dynamic data about a market and a specified runner.
Dynamic data includes prices, the status of the market, the status of selections,
the traded volume, and the status of any orders you have placed in the market
:param unicode market_id: The unique id for the market
:param int selection_id: The unique id for the selection in the market
:param double handicap: The projection of price data you want to receive in the response
:param dict price_projection: The projection of price data you want to receive in the response
:param str order_projection: The orders you want to receive in the response
:param str match_projection: If you ask for orders, specifies the representation of matches
:param bool include_overall_position: If you ask for orders, returns matches for each selection
:param bool partition_matched_by_strategy_ref: If you ask for orders, returns the breakdown of matches
by strategy for each selection
:param list customer_strategy_refs: If you ask for orders, restricts the results to orders matching
any of the specified set of customer defined strategies
:param str currency_code: A Betfair standard currency code
:param str matched_since: If you ask for orders, restricts the results to orders that have at
least one fragment matched since the specified date
:param list bet_ids: If you ask for orders, restricts the results to orders with the specified bet IDs
:param str locale: The language used for the response
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: list[resources.MarketBook] |
5,332 | def increase_writes_in_units(
current_provisioning, units, max_provisioned_writes,
consumed_write_units_percent, log_tag):
units = int(units)
current_provisioning = float(current_provisioning)
consumed_write_units_percent = float(consumed_write_units_percent)
consumption_based_current_provisioning = \
int(math.ceil(current_provisioning*(consumed_write_units_percent/100)))
if consumption_based_current_provisioning > current_provisioning:
updated_provisioning = consumption_based_current_provisioning + units
else:
updated_provisioning = int(current_provisioning) + units
if max_provisioned_writes > 0:
if updated_provisioning > max_provisioned_writes:
logger.info(
.format(
log_tag,
max_provisioned_writes))
return max_provisioned_writes
logger.debug(
.format(
log_tag,
int(updated_provisioning)))
return updated_provisioning | Increase the current_provisioning with units units
:type current_provisioning: int
:param current_provisioning: The current provisioning
:type units: int
:param units: How many units should we increase with
:returns: int -- New provisioning value
:type max_provisioned_writes: int
:param max_provisioned_writes: Configured max provisioned writes
:type consumed_write_units_percent: float
:param consumed_write_units_percent: Number of consumed write units
:type log_tag: str
:param log_tag: Prefix for the log |
5,333 | def write(self):
return \
\
.format(self.qname,
str(self.flag),
self.rname,
str(self.pos),
str(self.mapq),
self.cigar,
self.rnext,
str(self.pnext),
str(self.tlen),
self.seq,
self.qual,
os.linesep) | Return SAM formatted string
Returns:
str: SAM formatted string containing entire SAM entry |
5,334 | def tune(runner, kernel_options, device_options, tuning_options):
results = []
cache = {}
tuning_options["scaling"] = False
bounds = get_bounds(tuning_options.tune_params)
args = (kernel_options, tuning_options, runner, results, cache)
opt_result = differential_evolution(_cost_func, bounds, args, maxiter=1,
polish=False, disp=tuning_options.verbose)
if tuning_options.verbose:
print(opt_result.message)
return results, runner.dev.get_environment() | Find the best performing kernel configuration in the parameter space
:params runner: A runner from kernel_tuner.runners
:type runner: kernel_tuner.runner
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: kernel_tuner.interface.Options
:param device_options: A dictionary with all options for the device
on which the kernel should be tuned.
:type device_options: kernel_tuner.interface.Options
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: kernel_tuner.interface.Options
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict() |
5,335 | def decode_event(self, log_topics, log_data):
if not len(log_topics) or log_topics[0] not in self.event_data:
raise ValueError()
event_id_ = log_topics[0]
event = self.event_data[event_id_]
unindexed_types = [
type_
for type_, indexed in zip(event[], event[])
if not indexed
]
unindexed_args = decode_abi(unindexed_types, log_data)
indexed_count = 1
result = {}
for name, type_, indexed in zip(
event[], event[], event[]):
if indexed:
topic_bytes = utils.zpad(
utils.encode_int(log_topics[indexed_count]),
32,
)
indexed_count += 1
value = decode_single(process_type(type_), topic_bytes)
else:
value = unindexed_args.pop(0)
result[name] = value
result[] = utils.to_string(event[])
return result | Return a dictionary representation the log.
Note:
This function won't work with anonymous events.
Args:
log_topics (List[bin]): The log's indexed arguments.
log_data (bin): The encoded non-indexed arguments. |
5,336 | def _get_uploaded_file(session, file_info, fragment_count=0):
try:
return session.query(UploadedFile).filter(UploadedFile.sha1 == file_info.sha1).one()
except NoResultFound:
new_instance = UploadedFile(
sha1=file_info.sha1,
file_name=file_info.upath,
fragment_count=fragment_count
)
session.add(new_instance)
return new_instance | :param session: locked session (with self._session_resource as >> session <<)
:param file_info: contains file information to save or query
:param fragment_count: amount of fragments associated to the file
:return: an UploadedFile associated to the file_info |
5,337 | def entry_line_to_text(self, entry):
line = []
if not entry._text:
flags_text = self.flags_to_text(entry.flags)
duration_text = self.duration_to_text(entry.duration)
return .join(
(flags_text, if flags_text else , entry.alias, , duration_text, , entry.description)
)
for i, text in enumerate(entry._text):
if i in self.ENTRY_ATTRS_POSITION:
if self.ENTRY_ATTRS_POSITION[i] in entry._changed_attrs:
attr_name = self.ENTRY_ATTRS_POSITION[i]
attr_value = getattr(entry, self.ENTRY_ATTRS_POSITION[i])
if attr_name in self.ENTRY_ATTRS_TRANSFORMERS:
attr_value = getattr(self, self.ENTRY_ATTRS_TRANSFORMERS[attr_name])(attr_value)
else:
attr_value = text
line.append(attr_value)
else:
if len(line[i-1]) != len(entry._text[i-1]):
text = * max(1, (len(text) - (len(line[i-1]) - len(entry._text[i-1]))))
line.append(text)
return .join(line).strip() | Return the textual representation of an :class:`~taxi.timesheet.lines.Entry` instance. This method is a bit
convoluted since we don't want to completely mess up the original formatting of the entry. |
5,338 | def from_bytes(cls, bitstream):
packet = cls()
if not isinstance(bitstream, ConstBitStream):
if isinstance(bitstream, Bits):
bitstream = ConstBitStream(auto=bitstream)
else:
bitstream = ConstBitStream(bytes=bitstream)
(packet.source_port,
packet.destination_port) = bitstream.readlist()
length = bitstream.read()
if length < 8:
raise ValueError()
packet.checksum = bitstream.read()
payload_bytes = length - 8
packet.payload = bitstream.read( % payload_bytes)
if packet.source_port == 4341 or packet.destination_port == 4341:
from pylisp.packet.lisp.data import DataPacket
packet.payload = DataPacket.from_bytes(packet.payload)
elif packet.source_port == 4342 or packet.destination_port == 4342:
from pylisp.packet.lisp.control.base import ControlMessage
packet.payload = ControlMessage.from_bytes(packet.payload)
if bitstream.pos != bitstream.len:
raise ValueError()
packet.sanitize()
return packet | Parse the given packet and update properties accordingly |
5,339 | def _dens(self,R,z,phi=0.,t=0.):
r2= R**2+z**2
if r2 != self.a2:
return 0.
else:
return nu.infty | NAME:
_dens
PURPOSE:
evaluate the density for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the surface density
HISTORY:
2018-08-19 - Written - Bovy (UofT) |
5,340 | def create_async_sns_topic(self, lambda_name, lambda_arn):
topic_name = get_topic_name(lambda_name)
topic_arn = self.sns_client.create_topic(
Name=topic_name)[]
self.sns_client.subscribe(
TopicArn=topic_arn,
Protocol=,
Endpoint=lambda_arn
)
self.create_event_permission(
lambda_name=lambda_name,
principal=,
source_arn=topic_arn
)
add_event_source(
event_source={
"arn": topic_arn,
"events": ["sns:Publish"]
},
lambda_arn=lambda_arn,
target_function="zappa.asynchronous.route_task",
boto_session=self.boto_session
)
return topic_arn | Create the SNS-based async topic. |
5,341 | def problem_with_codon(codon_index, codon_list, bad_seqs):
base_1 = 3 * codon_index
base_3 = 3 * codon_index + 2
gene_seq = .join(codon_list)
for bad_seq in bad_seqs:
problem = bad_seq.search(gene_seq)
if problem and problem.start() < base_3 and problem.end() > base_1:
return True
return False | Return true if the given codon overlaps with a bad sequence. |
5,342 | def thin_sum(cachedir, form=):
thintar = gen_thin(cachedir)
code_checksum_path = os.path.join(cachedir, , )
if os.path.isfile(code_checksum_path):
with salt.utils.files.fopen(code_checksum_path, ) as fh:
code_checksum = "".format(fh.read().strip())
else:
code_checksum = ""
return code_checksum, salt.utils.hashutils.get_hash(thintar, form) | Return the checksum of the current thin tarball |
5,343 | def rate_limit_status(self):
return bind_api(
api=self,
path=,
payload_type=,
allowed_param=[],
use_cache=False
) | :reference: https://developer.twitter.com/en/docs/developer-utilities/rate-limit-status/api-reference/get-application-rate_limit_status
:allowed_param:'resources' |
5,344 | def get_views(self, item=None, key=None, option=None):
if item is None:
item_views = self.views
else:
item_views = self.views[item]
if key is None:
return item_views
else:
if option is None:
return item_views[key]
else:
if option in item_views[key]:
return item_views[key][option]
else:
return | Return the views object.
If key is None, return all the view for the current plugin
else if option is None return the view for the specific key (all option)
else return the view fo the specific key/option
Specify item if the stats are stored in a dict of dict (ex: NETWORK, FS...) |
5,345 | def push(self, cf):
cf.next = self
if self.state is not None:
self.state.register_plugin(, cf)
self.state.history.recent_stack_actions.append(CallStackAction(
hash(cf), len(cf), , callframe=cf.copy({}, with_tail=False)
))
return cf | Push the frame cf onto the stack. Return the new stack. |
5,346 | def infer_x(self, y):
OptimizedInverseModel.infer_x(self, y)
if self.fmodel.size() == 0:
return self._random_x()
x_guesses = [self._guess_x_simple(y)[0]]
result = []
for xg in x_guesses:
res = cma.fmin(self._error, xg, self.cmaes_sigma,
options={:[self.lower, self.upper],
:0,
:False,
:self.maxfevals,
: self.seed})
result.append((res[1], res[0]))
return [xi for fi, xi in sorted(result)] | Infer probable x from input y
@param y the desired output for infered x.
@return a list of probable x |
5,347 | def _from_json_array_nested(cls, response_raw):
json = response_raw.body_bytes.decode()
obj = converter.json_to_class(dict, json)
value = converter.deserialize(cls, obj[cls._FIELD_RESPONSE])
return client.BunqResponse(value, response_raw.headers) | :type response_raw: client.BunqResponseRaw
:rtype: bunq.sdk.client.BunqResponse[cls] |
5,348 | def do_help(self, arg):
if not arg:
Cmd.do_help(self, arg)
elif arg in (, ):
print(" Help! I need somebody...")
print(" Help! Not just anybody...")
print(" Help! You know, I need someone...")
print(" Heeelp!")
else:
if arg == :
commands = self.get_names()
commands = [ x for x in commands if x.startswith() ]
else:
commands = set()
for x in arg.split():
x = x.strip()
if x:
for n in self.completenames(x):
commands.add( % n )
commands = list(commands)
commands.sort()
print(self.get_help(commands)) | ? - show the list of available commands
? * - show help for all commands
? <command> [command...] - show help for the given command(s)
help - show the list of available commands
help * - show help for all commands
help <command> [command...] - show help for the given command(s) |
5,349 | def add_tracked_motors(self, tracked_motors):
new_mockup_motors = map(self.get_mockup_motor, tracked_motors)
self.tracked_motors = list(set(self.tracked_motors + new_mockup_motors)) | Add new motors to the recording |
5,350 | def state(anon, obj, field, val):
return anon.faker.state(field=field) | Returns a randomly selected US state code |
5,351 | def within_n_mads(n, series):
mad_score = (series - series.mean()) / series.mad()
return (mad_score.abs() <= n).all() | Return true if all values in sequence are within n MADs |
5,352 | def refresh(self, force_cache=False):
if self.check_if_ok_to_update() or force_cache:
for sync_name, sync_module in self.sync.items():
_LOGGER.debug("Attempting refresh of sync %s", sync_name)
sync_module.refresh(force_cache=force_cache)
if not force_cache:
self.last_refresh = int(time.time())
return True
return False | Perform a system refresh.
:param force_cache: Force an update of the camera cache |
5,353 | def _dataframe_fields(self):
fields_to_include = {
: self.assist_percentage,
: self.assists,
: self.block_percentage,
: self.blocks,
: self.box_plus_minus,
: self.conference,
: self.defensive_box_plus_minus,
: self.defensive_rebound_percentage,
: self.defensive_rebounds,
: self.defensive_win_shares,
:
self.effective_field_goal_percentage,
: self.field_goal_attempts,
: self.field_goal_percentage,
: self.field_goals,
: self.free_throw_attempt_rate,
: self.free_throw_attempts,
: self.free_throw_percentage,
: self.free_throws,
: self.games_played,
: self.games_started,
: self.height,
: self.minutes_played,
: self.offensive_box_plus_minus,
: self.offensive_rebound_percentage,
: self.offensive_rebounds,
: self.offensive_win_shares,
: self.personal_fouls,
: self.player_efficiency_rating,
: self.player_id,
: self.points,
: self.points_produced,
: self.position,
: self.steal_percentage,
: self.steals,
: self.team_abbreviation,
: self.three_point_attempt_rate,
: self.three_point_attempts,
: self.three_point_percentage,
: self.three_pointers,
: self.total_rebound_percentage,
: self.total_rebounds,
: self.true_shooting_percentage,
: self.turnover_percentage,
: self.turnovers,
: self.two_point_attempts,
: self.two_point_percentage,
: self.two_pointers,
: self.usage_percentage,
: self.weight,
: self.win_shares,
: self.win_shares_per_40_minutes,
}
return fields_to_include | Creates a dictionary of all fields to include with DataFrame.
With the result of the calls to class properties changing based on the
class index value, the dictionary should be regenerated every time the
index is changed when the dataframe property is requested.
Returns
-------
dictionary
Returns a dictionary where the keys are the shortened ``string``
attribute names and the values are the actual value for each
attribute for the specified index. |
5,354 | def words(content, filter=True, predicate=None):
def accept_word(word):
return len(word) > 2 \
and word.lower() not in stop_words \
and not _UNWANTED_WORDS_PATTERN.match(word)
words = _tokenize(content)
if filter or predicate:
if not predicate:
predicate = accept_word
return (w for w in words if predicate(w))
return words | \
Returns an iterable of words from the provided text.
`content`
A text.
`filter`
Indicates if stop words and garbage like "xxxxxx" should be removed from
the word list.
`predicate`
An alternative word filter. If it is ``None`` "xxxx", "---",
default stop words, and words which have no min. length of 3 are filtered
(iff ``filter`` is set to ``True``).
>>> list(words('Hello and goodbye ------ '))
['Hello', 'goodbye']
>>> list(words('Hello, and goodbye ------ Subject xxxxxxxxx XXXXXXXXXXXX here'))
['Hello', 'goodbye', 'Subject']
>>> list(words('Hello, and goodbye.How are you?'))
['Hello', 'goodbye'] |
5,355 | def add_github_hook_options(parser):
cookbook = parser.add_parser(, help=
)
cookbook.add_argument(, action=,
help=)
cookbook.add_argument(, action=,
help=)
domain = socket.gethostname()
example = % domain
cookbook.add_argument(, action=,
help= % example)
cookbook.add_argument(, ,
action=,
dest=,
default=github.GITHUB_HOST,
help=
)
cookbook.add_argument(, ,
action=,
dest=,
help=
)
cookbook.set_defaults(func=) | Add the github jenkins hook command and arguments.
:rtype: argparse.ArgumentParser |
5,356 | def fit_first_and_second_harmonics(phi, intensities):
a1 = b1 = a2 = b2 = 1.
def optimize_func(x):
return first_and_second_harmonic_function(
phi, np.array([x[0], x[1], x[2], x[3], x[4]])) - intensities
return _least_squares_fit(optimize_func, [np.mean(intensities), a1, b1,
a2, b2]) | Fit the first and second harmonic function values to a set of
(angle, intensity) pairs.
This function is used to compute corrections for ellipse fitting:
.. math::
f(phi) = y0 + a1*\\sin(phi) + b1*\\cos(phi) + a2*\\sin(2*phi) +
b2*\\cos(2*phi)
Parameters
----------
phi : float or `~numpy.ndarray`
The angle(s) along the elliptical path, going towards the positive
y axis, starting coincident with the position angle. That is, the
angles are defined from the semimajor axis that lies in
the positive x quadrant.
intensities : `~numpy.ndarray`
The intensities measured along the elliptical path, at the
angles defined by the ``phi`` parameter.
Returns
-------
y0, a1, b1, a2, b2 : float
The fitted harmonic coefficent values. |
5,357 | def Update(self, menu=None, tooltip=None,filename=None, data=None, data_base64=None,):
if menu is not None:
self.Menu = menu
qmenu = QMenu()
qmenu.setTitle(self.Menu[0])
AddTrayMenuItem(qmenu, self.Menu[1], self)
self.TrayIcon.setContextMenu(qmenu)
if tooltip is not None:
self.TrayIcon.setToolTip(str(tooltip))
qicon = None
if filename is not None:
qicon = QIcon(filename)
elif data is not None:
ba = QtCore.QByteArray.fromRawData(data)
pixmap = QtGui.QPixmap()
pixmap.loadFromData(ba)
qicon = QIcon(pixmap)
elif data_base64 is not None:
ba = QtCore.QByteArray.fromBase64(data_base64)
pixmap = QtGui.QPixmap()
pixmap.loadFromData(ba)
qicon = QIcon(pixmap)
if qicon is not None:
self.TrayIcon.setIcon(qicon) | Updates the menu, tooltip or icon
:param menu: menu defintion
:param tooltip: string representing tooltip
:param filename: icon filename
:param data: icon raw image
:param data_base64: icon base 64 image
:return: |
5,358 | def normalize_enum_constant(s):
if s.islower(): return s
if s.isupper(): return s.lower()
return "".join(ch if ch.islower() else "_" + ch.lower() for ch in s).strip("_") | Return enum constant `s` converted to a canonical snake-case. |
5,359 | def check_valid_cpc_status(method, uri, cpc):
status = cpc.properties.get(, None)
if status is None:
return
valid_statuses = [, , , ]
if status not in valid_statuses:
if uri.startswith(cpc.uri):
raise ConflictError(method, uri, reason=1,
message="The operation cannot be performed "
"because the targeted CPC {} has a status "
"that is not valid for the operation: {}".
format(cpc.name, status))
else:
raise ConflictError(method, uri, reason=6,
message="The operation cannot be performed "
"because CPC {} hosting the targeted resource "
"has a status that is not valid for the "
"operation: {}".
format(cpc.name, status)) | Check that the CPC is in a valid status, as indicated by its 'status'
property.
If the Cpc object does not have a 'status' property set, this function does
nothing (in order to make the mock support easy to use).
Raises:
ConflictError with reason 1: The CPC itself has been targeted by the
operation.
ConflictError with reason 6: The CPC is hosting the resource targeted by
the operation. |
5,360 | def pretty_print_json(self, json_string):
return json.dumps(self.string_to_json(json_string), indent=2, ensure_ascii=False) | Return formatted JSON string _json_string_.\n
Using method json.dumps with settings: _indent=2, ensure_ascii=False_.
*Args:*\n
_json_string_ - JSON string.
*Returns:*\n
Formatted JSON string.
*Example:*\n
| *Settings* | *Value* |
| Library | JsonValidator |
| Library | OperatingSystem |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| Check element | ${pretty_json}= | Pretty print json | {a:1,foo:[{b:2,c:3},{d:"baz",e:4}]} |
| | Log | ${pretty_json} |
=>\n
| {
| "a": 1,
| "foo": [
| {
| "c": 3,
| "b": 2
| },
| {
| "e": 4,
| "d": "baz"
| }
| ]
| } |
5,361 | def exclude_by_ends(in_file, exclude_file, data, in_params=None):
params = {"end_buffer": 50,
"rpt_pct": 0.9,
"total_rpt_pct": 0.2,
"sv_pct": 0.5}
if in_params:
params.update(in_params)
assert in_file.endswith(".bed")
out_file = "%s-norepeats%s" % utils.splitext_plus(in_file)
to_filter = collections.defaultdict(list)
removed = 0
if not utils.file_uptodate(out_file, in_file):
with file_transaction(data, out_file) as tx_out_file:
with shared.bedtools_tmpdir(data):
for coord, end_name in [(1, "end1"), (2, "end2")]:
base, ext = utils.splitext_plus(tx_out_file)
end_file = _create_end_file(in_file, coord, params, "%s-%s%s" % (base, end_name, ext))
to_filter = _find_to_filter(end_file, exclude_file, params, to_filter)
with open(tx_out_file, "w") as out_handle:
with open(in_file) as in_handle:
for line in in_handle:
key = "%s:%s-%s" % tuple(line.strip().split("\t")[:3])
total_rpt_size = sum(to_filter.get(key, [0]))
if total_rpt_size <= (params["total_rpt_pct"] * params["end_buffer"]):
out_handle.write(line)
else:
removed += 1
return out_file, removed | Exclude calls based on overlap of the ends with exclusion regions.
Removes structural variants with either end being in a repeat: a large
source of false positives.
Parameters tuned based on removal of LCR overlapping false positives in DREAM
synthetic 3 data. |
5,362 | def copy_and_disconnect_tree(root, machine):
new_root = None
new_lookup = {}
broken_links = set()
to_visit = deque([(None, None, root)])
while to_visit:
new_parent, direction, old_node = to_visit.popleft()
if old_node.chip in machine:
new_node = RoutingTree(old_node.chip)
new_lookup[new_node.chip] = new_node
else:
assert new_parent is not None, \
"Net cannot be sourced from a dead chip."
new_node = new_parent
if new_parent is None:
new_root = new_node
elif new_node is not new_parent:
if direction in links_between(new_parent.chip,
new_node.chip,
machine):
new_parent.children.append((direction, new_node))
else:
broken_links.add((new_parent.chip, new_node.chip))
for child_direction, child in old_node.children:
to_visit.append((new_node, child_direction, child))
return (new_root, new_lookup, broken_links) | Copy a RoutingTree (containing nothing but RoutingTrees), disconnecting
nodes which are not connected in the machine.
Note that if a dead chip is part of the input RoutingTree, no corresponding
node will be included in the copy. The assumption behind this is that the
only reason a tree would visit a dead chip is because a route passed
through the chip and wasn't actually destined to arrive at that chip. This
situation is impossible to confirm since the input routing trees have not
yet been populated with vertices. The caller is responsible for being
sensible.
Parameters
----------
root : :py:class:`~rig.place_and_route.routing_tree.RoutingTree`
The root of the RoutingTree that contains nothing but RoutingTrees
(i.e. no children which are vertices or links).
machine : :py:class:`~rig.place_and_route.Machine`
The machine in which the routes exist.
Returns
-------
(root, lookup, broken_links)
Where:
* `root` is the new root of the tree
:py:class:`~rig.place_and_route.routing_tree.RoutingTree`
* `lookup` is a dict {(x, y):
:py:class:`~rig.place_and_route.routing_tree.RoutingTree`, ...}
* `broken_links` is a set ([(parent, child), ...]) containing all
disconnected parent and child (x, y) pairs due to broken links. |
5,363 | def is_binary(path):
if not os.path.isfile(path):
return False
try:
with fopen(path, ) as fp_:
try:
data = fp_.read(2048)
if six.PY3:
data = data.decode(__salt_system_encoding__)
return salt.utils.stringutils.is_binary(data)
except UnicodeDecodeError:
return True
except os.error:
return False | Detects if the file is a binary, returns bool. Returns True if the file is
a bin, False if the file is not and None if the file is not available. |
5,364 | def format_item(x, timedelta_format=None, quote_strings=True):
if isinstance(x, (np.datetime64, datetime)):
return format_timestamp(x)
if isinstance(x, (np.timedelta64, timedelta)):
return format_timedelta(x, timedelta_format=timedelta_format)
elif isinstance(x, (str, bytes)):
return repr(x) if quote_strings else x
elif isinstance(x, (float, np.float)):
return .format(x)
else:
return str(x) | Returns a succinct summary of an object as a string |
5,365 | def close(self):
try:
try:
self.connection.quit()
except socket.sslerror:
self.connection.close()
except Exception as e:
logger.error(
"Error trying to close connection to server " "%s:%s: %s",
self.host,
self.port,
e,
)
if self.fail_silently:
return
raise
finally:
self.connection = None | Close the connection to the email server. |
5,366 | def metadata_path(self, m_path):
if not m_path:
self.metadata_dir = None
self.metadata_file = None
else:
if not op.exists(m_path):
raise OSError(.format(m_path))
if not op.dirname(m_path):
self.metadata_dir =
else:
self.metadata_dir = op.dirname(m_path)
self.metadata_file = op.basename(m_path)
self.update(parse_kegg_gene_metadata(self.metadata_path), overwrite=True) | Provide pointers to the paths of the metadata file
Args:
m_path: Path to metadata file |
5,367 | def structured_partlist(input, timeout=20, showgui=False):
partvaluepartC1value1n
s = raw_partlist(input=input, timeout=timeout, showgui=showgui)
return parse_partlist(s) | export partlist by eagle, then parse it
:param input: .sch or .brd file name
:param timeout: int
:param showgui: Bool, True -> do not hide eagle GUI
:rtype: tuple of header list and dict list: (['part','value',..], [{'part':'C1', 'value':'1n'}, ..]) |
5,368 | def parse_universe_description(self, description):
sid1,sid2,sid2,...exchangeexchange,ntype:index:submarket
self.raw_description = description
description = description.split()
self.exchange = description[0]
n = int(description[1]) if len(description) == 2 else -1
self.sids = self._lookup_sids(description[0], n) | Semantic
- 'sid1,sid2,sid2,...'
- 'exchange' : every sids of the exchange
- 'exchange,n' : n random sids of the exchange
where exchange is a combination of 'type:index:submarket' |
5,369 | def find_matching_bracket_position(self, start_pos=None, end_pos=None):
for A, B in , , , :
if self.current_char == A:
return self.find_enclosing_bracket_right(A, B, end_pos=end_pos) or 0
elif self.current_char == B:
return self.find_enclosing_bracket_left(A, B, start_pos=start_pos) or 0
return 0 | Return relative cursor position of matching [, (, { or < bracket.
When `start_pos` or `end_pos` are given. Don't look past the positions. |
5,370 | def get_data_record(self, brain, field_names):
record = {}
model = None
for field_name in field_names:
value = getattr(brain, field_name, None)
if value is None:
logger.warn("Not a metadata field: {}".format(field_name))
model = model or SuperModel(brain)
value = model.get(field_name, None)
if callable(value):
value = value()
record[field_name] = value or " "
return record | Returns a dict with the column values for the given brain |
5,371 | def get_node_label(self, model):
if model.is_proxy:
label = "(P) %s" % (model.name.title())
else:
label = "%s" % (model.name.title())
line = ""
new_label = []
for w in label.split(" "):
if len(line + w) > 15:
new_label.append(line)
line = w
else:
line += " "
line += w
new_label.append(line)
return "\n".join(new_label) | Defines how labels are constructed from models.
Default - uses verbose name, lines breaks where sensible |
5,372 | def recreate_article_body(self):
for foreign_id, body in iteritems(self.record_keeper.article_bodies):
try:
local_page_id = self.record_keeper.get_local_page(foreign_id)
page = Page.objects.get(id=local_page_id).specific
new_body = []
for item in body:
if not item[]:
continue
if item[] == :
new_page_id = self.record_keeper.get_local_page(
item[])
item[] = new_page_id
elif item[] == :
new_image_id = self.record_keeper.get_local_image(
item[])
item[] = new_image_id
new_body.append(item)
setattr(page, , json.dumps(new_body))
page.save_revision().publish()
except Exception as e:
self.log(ERROR, "recreating article body",
{
"exception": e,
"foreign_id": foreign_id,
"body": body,
},
depth=1) | Handles case where article body contained page or image.
Assumes all articles and images have been created. |
5,373 | def get_perceel_by_capakey(self, capakey):
def creator():
url = self.base_url + % capakey
h = self.base_headers
p = {
: ,
: ,
:
}
res = capakey_rest_gateway_request(url, h, p).json()
return Perceel(
res[],
Sectie(
res[],
Afdeling(
res[],
res[],
Gemeente(res[], res[])
)
),
res[],
Perceel.get_percid_from_capakey(res[]),
None,
None,
self._parse_centroid(res[][]),
self._parse_bounding_box(res[][]),
res[][]
)
if self.caches[].is_configured:
key = % capakey
perceel = self.caches[].get_or_create(key, creator)
else:
perceel = creator()
perceel.set_gateway(self)
return perceel | Get a `perceel`.
:param capakey: An capakey for a `perceel`.
:rtype: :class:`Perceel` |
5,374 | def safe_size(source):
if source is None:
return None
total_bytes = 0
bytes = []
b = source.read(MIN_READ_SIZE)
while b:
total_bytes += len(b)
bytes.append(b)
if total_bytes > MAX_STRING_SIZE:
try:
data = FileString(TemporaryFile())
for bb in bytes:
data.write(bb)
del bytes
del bb
b = source.read(MIN_READ_SIZE)
while b:
total_bytes += len(b)
data.write(b)
b = source.read(MIN_READ_SIZE)
data.seek(0)
Log.note("Using file of size {{length}} instead of str()", length= total_bytes)
return data
except Exception as e:
Log.error("Could not write file > {{num}} bytes", num= total_bytes, cause=e)
b = source.read(MIN_READ_SIZE)
data = b"".join(bytes)
del bytes
return data | READ THE source UP TO SOME LIMIT, THEN COPY TO A FILE IF TOO BIG
RETURN A str() OR A FileString() |
5,375 | def get_config():
*
profiles = {}
curr = None
cmd = [, , , ]
ret = __salt__[](cmd, python_shell=False, ignore_retcode=True)
if ret[] != 0:
raise CommandExecutionError(ret[])
curr = None
return profiles | Get the status of all the firewall profiles
Returns:
dict: A dictionary of all profiles on the system
Raises:
CommandExecutionError: If the command fails
CLI Example:
.. code-block:: bash
salt '*' firewall.get_config |
5,376 | def get_git_postversion(addon_dir):
addon_dir = os.path.realpath(addon_dir)
last_version = read_manifest(addon_dir).get(, )
last_version_parsed = parse_version(last_version)
if not is_git_controlled(addon_dir):
return last_version
if get_git_uncommitted(addon_dir):
uncommitted = True
count = 1
else:
uncommitted = False
count = 0
last_sha = None
git_root = get_git_root(addon_dir)
for sha in git_log_iterator(addon_dir):
try:
manifest = read_manifest_from_sha(sha, addon_dir, git_root)
except NoManifestFound:
break
version = manifest.get(, )
version_parsed = parse_version(version)
if version_parsed != last_version_parsed:
break
if last_sha is None:
last_sha = sha
else:
count += 1
if not count:
return last_version
if last_sha:
return last_version + ".99.dev%s" % count
if uncommitted:
return last_version + ".dev1"
return last_version | return the addon version number, with a developmental version increment
if there were git commits in the addon_dir after the last version change.
If the last change to the addon correspond to the version number in the
manifest it is used as is for the python package version. Otherwise a
counter is incremented for each commit and resulting version number has
the following form: [8|9].0.x.y.z.1devN, N being the number of git
commits since the version change.
Note: we use .99.devN because:
* pip ignores .postN by design (https://github.com/pypa/pip/issues/2872)
* x.y.z.devN is anterior to x.y.z
Note: we don't put the sha1 of the commit in the version number because
this is not PEP 440 compliant and is therefore misinterpreted by pip. |
5,377 | def callback(self, timestamp, event_type, payload):
try:
data = (event_type, payload)
LOG.debug(
, (
{: event_type, : payload}))
if in event_type:
pri = self._create_pri
elif in event_type:
pri = self._delete_pri
elif in event_type:
pri = self._update_pri
else:
pri = self._delete_pri
self._pq.put((pri, timestamp, data))
except Exception as exc:
LOG.exception(,
{: str(exc), : event_type}) | Callback method for processing events in notification queue.
:param timestamp: time the message is received.
:param event_type: event type in the notification queue such as
identity.project.created, identity.project.deleted.
:param payload: Contains information of an event |
5,378 | def open_like(a, path, **kwargs):
_like_args(a, kwargs)
if isinstance(a, Array):
kwargs.setdefault(, a.fill_value)
return open_array(path, **kwargs) | Open a persistent array like `a`. |
5,379 | def get_additional_properties(self, _type, *args, **kwargs):
if not SchemaObjects.contains(_type):
return _type
schema = SchemaObjects.get(_type)
body = []
for sch in schema.nested_schemas:
nested_schema = SchemaObjects.get(sch)
if not (nested_schema or isinstance(nested_schema, SchemaMapWrapper)):
continue
body.append(.format(self.get_type_description(
nested_schema.schema_id, *args, **kwargs))
)
if nested_schema.is_array:
_schema = SchemaObjects.get(nested_schema.item.get())
if _schema and _schema.schema_type == SchemaTypes.INLINE:
body.append(self.get_regular_properties(_schema.schema_id, *args, **kwargs))
else:
body.append(self.get_regular_properties(nested_schema.schema_id, *args, **kwargs))
if schema.type_format:
body.append(
.format(self.get_type_description(schema.type_format, *args, **kwargs)))
return .join(body) | Make head and table with additional properties by schema_id
:param str _type:
:rtype: str |
5,380 | def detail_search(self, params, standardize=False):
response = self._request(ENDPOINTS[], params)
result_data = []
for person in response[]:
try:
detail = self.person_details(person[],
standardize=standardize)
except ValueError:
pass
else:
result_data.append(detail)
response[] = result_data
return response | Get a detailed list of person objects for the given search params.
:param params:
Dictionary specifying the query parameters
>>> people_detailed = d.detail_search({'first_name': 'tobias', 'last_name': 'funke'}) |
5,381 | def dehydrate(self):
result = {}
for attr in self.attrs:
result[attr] = getattr(self, attr)
return result | Return a dict representing this bucket. |
5,382 | def heartbeat(request):
all_checks = checks.registry.registry.get_checks(
include_deployment_checks=not settings.DEBUG,
)
details = {}
statuses = {}
level = 0
for check in all_checks:
detail = heartbeat_check_detail(check)
statuses[check.__name__] = detail[]
level = max(level, detail[])
if detail[] > 0:
details[check.__name__] = detail
if level < checks.messages.WARNING:
status_code = 200
heartbeat_passed.send(sender=heartbeat, level=level)
else:
status_code = 500
heartbeat_failed.send(sender=heartbeat, level=level)
payload = {
: level_to_text(level),
: statuses,
: details,
}
return JsonResponse(payload, status=status_code) | Runs all the Django checks and returns a JsonResponse with either
a status code of 200 or 500 depending on the results of the checks.
Any check that returns a warning or worse (error, critical) will
return a 500 response. |
5,383 | def lbol_from_spt_dist_mag (sptnum, dist_pc, jmag, kmag, format=):
bcj = bcj_from_spt (sptnum)
bck = bck_from_spt (sptnum)
n = np.zeros (sptnum.shape, dtype=np.int)
app_mbol = np.zeros (sptnum.shape)
w = np.isfinite (bcj) & np.isfinite (jmag)
app_mbol[w] += jmag[w] + bcj[w]
n[w] += 1
w = np.isfinite (bck) & np.isfinite (kmag)
app_mbol[w] += kmag[w] + bck[w]
n[w] += 1
w = (n != 0)
abs_mbol = (app_mbol[w] / n[w]) - 5 * (np.log10 (dist_pc[w]) - 1)
lbol = np.empty (sptnum.shape)
lbol.fill (np.nan)
lbol[w] = lbol_from_mbol (abs_mbol, format=format)
return lbol | Estimate a UCD's bolometric luminosity given some basic parameters.
sptnum: the spectral type as a number; 8 -> M8; 10 -> L0 ; 20 -> T0
Valid values range between 0 and 30, ie M0 to Y0.
dist_pc: distance to the object in parsecs
jmag: object's J-band magnitude or NaN (*not* None) if unavailable
kmag: same with K-band magnitude
format: either 'cgs', 'logcgs', or 'logsun', defining the form of the
outputs. Logarithmic quantities are base 10.
This routine can be used with vectors of measurements. The result will be
NaN if a value cannot be computed. This routine implements the method
documented in the Appendix of Williams et al., 2014ApJ...785....9W
(doi:10.1088/0004-637X/785/1/9). |
5,384 | def template_from_file(basedir, path, vars):
from cirruscluster.ext.ansible import utils
realpath = utils.path_dwim(basedir, path)
loader=jinja2.FileSystemLoader([basedir,os.path.dirname(realpath)])
environment = jinja2.Environment(loader=loader, trim_blocks=True)
for filter_plugin in utils.plugins.filter_loader.all():
filters = filter_plugin.filters()
if not isinstance(filters, dict):
raise errors.AnsibleError("FilterModule.filters should return a dict.")
environment.filters.update(filters)
try:
data = codecs.open(realpath, encoding="utf8").read()
except UnicodeDecodeError:
raise errors.AnsibleError("unable to process as utf-8: %s" % realpath)
except:
raise errors.AnsibleError("unable to read %s" % realpath)
if data.startswith(JINJA2_OVERRIDE):
eol = data.find()
line = data[len(JINJA2_OVERRIDE):eol]
data = data[eol+1:]
for pair in line.split():
(key,val) = pair.split()
setattr(environment,key.strip(),val.strip())
environment.template_class = J2Template
t = environment.from_string(data)
vars = vars.copy()
try:
template_uid = pwd.getpwuid(os.stat(realpath).st_uid).pw_name
except:
template_uid = os.stat(realpath).st_uid
vars[] = os.uname()[1]
vars[] = realpath
vars[] = datetime.datetime.fromtimestamp(os.path.getmtime(realpath))
vars[] = template_uid
vars[] = os.path.abspath(realpath)
vars[] = datetime.datetime.now()
managed_default = C.DEFAULT_MANAGED_STR
managed_str = managed_default.format(
host = vars[],
uid = vars[],
file = vars[]
)
vars[] = time.strftime(managed_str,
time.localtime(os.path.getmtime(realpath)))
return template(basedir, res, vars) | run a file through the templating engine |
5,385 | def join(self, fm_new, minimal_subset=True):
orig_fields = self._fields[:]
for field in orig_fields:
if not field in fm_new._fields:
if minimal_subset:
self.rm_field(field)
else:
warnings.warn("This option is deprecated. Clean and Filter your data before it is joined.", DeprecationWarning)
fm_new.add_field_like(field, self.field(field))
orig_fields = fm_new._fields[:]
for field in orig_fields:
if not field in self._fields:
if minimal_subset:
fm_new.rm_field(field)
else:
warnings.warn("This option is deprecated. Clean and Filter your data before it is joined.", DeprecationWarning)
self.add_field_like(field, fm_new.field(field))
if in self._fields[:]:
if fm_new.SUBJECTINDEX[0] in self.SUBJECTINDEX:
fm_new.SUBJECTINDEX[:] = self.SUBJECTINDEX.max()+1
for field in self._fields:
self.__dict__[field] = ma.hstack((self.__dict__[field],
fm_new.__dict__[field]))
self._num_fix += fm_new._num_fix | Adds content of a new Datamat to this Datamat.
If a parameter of the Datamats is not equal or does not exist
in one, it is promoted to a field.
If the two Datamats have different fields then the elements for the
Datamats that did not have the field will be NaN, unless
'minimal_subset' is true, in which case the mismatching fields will
simply be deleted.
Parameters
fm_new : instance of Datamat
This Datamat is added to the current one.
minimal_subset : if true, remove fields which don't exist in both,
instead of using NaNs for missing elements (defaults to False)
Capacity to use superset of fields added by rmuil 2012/01/30 |
5,386 | def fetch_guilds(self, *, limit=100, before=None, after=None):
return GuildIterator(self, limit=limit, before=before, after=after) | |coro|
Retrieves an :class:`.AsyncIterator` that enables receiving your guilds.
.. note::
Using this, you will only receive :attr:`.Guild.owner`, :attr:`.Guild.icon`,
:attr:`.Guild.id`, and :attr:`.Guild.name` per :class:`.Guild`.
.. note::
This method is an API call. For general usage, consider :attr:`guilds` instead.
All parameters are optional.
Parameters
-----------
limit: Optional[:class:`int`]
The number of guilds to retrieve.
If ``None``, it retrieves every guild you have access to. Note, however,
that this would make it a slow operation.
Defaults to 100.
before: :class:`.abc.Snowflake` or :class:`datetime.datetime`
Retrieves guilds before this date or object.
If a date is provided it must be a timezone-naive datetime representing UTC time.
after: :class:`.abc.Snowflake` or :class:`datetime.datetime`
Retrieve guilds after this date or object.
If a date is provided it must be a timezone-naive datetime representing UTC time.
Raises
------
HTTPException
Getting the guilds failed.
Yields
--------
:class:`.Guild`
The guild with the guild data parsed.
Examples
---------
Usage ::
async for guild in client.fetch_guilds(limit=150):
print(guild.name)
Flattening into a list ::
guilds = await client.fetch_guilds(limit=150).flatten()
# guilds is now a list of Guild... |
5,387 | def emflx(self, area, wavelengths=None):
t_lambda = self.tlambda(wavelengths=wavelengths)
if t_lambda == 0:
em_flux = 0.0 * units.FLAM
else:
uresp = self.unit_response(area, wavelengths=wavelengths)
equvw = self.equivwidth(wavelengths=wavelengths).value
em_flux = uresp * equvw / t_lambda
return em_flux | Calculate
:ref:`equivalent monochromatic flux <synphot-formula-emflx>`.
Parameters
----------
area, wavelengths
See :func:`unit_response`.
Returns
-------
em_flux : `~astropy.units.quantity.Quantity`
Equivalent monochromatic flux. |
5,388 | def parse(self, line=None):
args = self.parser.parse_args(args=line)
return args.func(args) | parses the line provided, if None then uses sys.argv |
5,389 | def _gatk_extract_reads_cl(data, region, prep_params, tmp_dir):
args = ["PrintReads",
"-L", region_to_gatk(region),
"-R", dd.get_ref_file(data),
"-I", data["work_bam"]]
if "gatk4" in dd.get_tools_off(data):
args = ["--analysis_type"] + args
runner = broad.runner_from_config(data["config"])
return runner.cl_gatk(args, tmp_dir) | Use GATK to extract reads from full BAM file. |
5,390 | def dataset_merge_method(dataset, other, overwrite_vars, compat, join):
if isinstance(overwrite_vars, str):
overwrite_vars = set([overwrite_vars])
overwrite_vars = set(overwrite_vars)
if not overwrite_vars:
objs = [dataset, other]
priority_arg = None
elif overwrite_vars == set(other):
objs = [dataset, other]
priority_arg = 1
else:
other_overwrite = OrderedDict()
other_no_overwrite = OrderedDict()
for k, v in other.items():
if k in overwrite_vars:
other_overwrite[k] = v
else:
other_no_overwrite[k] = v
objs = [dataset, other_no_overwrite, other_overwrite]
priority_arg = 2
return merge_core(objs, compat, join, priority_arg=priority_arg) | Guts of the Dataset.merge method. |
5,391 | def get_section_key_line(self, data, key, opt_extension=):
return super(GoogledocTools, self).get_section_key_line(data, key, opt_extension) | Get the next section line for a given key.
:param data: the data to proceed
:param key: the key
:param opt_extension: an optional extension to delimit the opt value |
5,392 | def set_blink_rate(self, b):
if b > 3:
b = 0
self.firmata.i2c_write(self.board_address,
(self.HT16K33_BLINK_CMD | self.HT16K33_BLINK_DISPLAYON | (b << 1))) | Set the user's desired blink rate (0 - 3)
@param b: blink rate |
5,393 | def git_env(self):
env = dict(os.environ)
for var in ["HOME", "XDG_CONFIG_HOME"]:
env.pop(var, None)
env["GIT_CONFIG_NOSYSTEM"] = "true"
env["GIT_INDEX_FILE"] = os.path.abspath(self.index_file)
return env | Set the index file and prevent git from reading global configs. |
5,394 | def _ParseAndValidateRecord(self, parser_mediator, text_file_object):
try:
title = text_file_object.readline(size=self._MAXIMUM_LINE_SIZE)
url = text_file_object.readline(size=self._MAXIMUM_LINE_SIZE)
timestamp = text_file_object.readline(size=self._MAXIMUM_LINE_SIZE)
popularity_index = text_file_object.readline(size=self._MAXIMUM_LINE_SIZE)
except UnicodeDecodeError:
return False
if len(title) == self._MAXIMUM_LINE_SIZE and title[-1] != :
return False
if len(url) == self._MAXIMUM_LINE_SIZE and url[-1] != :
return False
if len(timestamp) == self._MAXIMUM_LINE_SIZE and timestamp[-1] != :
return False
if (len(popularity_index) == self._MAXIMUM_LINE_SIZE and
popularity_index[-1] != ):
return False
title = title.strip()
url = url.strip()
timestamp = timestamp.strip()
popularity_index = popularity_index.strip()
if not title or not url or not timestamp or not popularity_index:
return False
event_data = OperaGlobalHistoryEventData()
if not self._IsValidUrl(url):
return False
event_data.url = url
if title != url:
event_data.title = title
try:
event_data.popularity_index = int(popularity_index, 10)
timestamp = int(timestamp, 10)
except ValueError:
return False
if event_data.popularity_index < 0:
event_data.description =
else:
event_data.description =
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)
parser_mediator.ProduceEventWithEventData(event, event_data)
return True | Parses and validates an Opera global history record.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
text_file_object (dfvfs.TextFile): text file.
Returns:
bool: True if the record was successfully parsed. |
5,395 | def open(self, callback, instance=None, processor=None):
assert not self._closed
ws_url = self._client.ws_root
if instance:
ws_url += + instance
if processor:
ws_url += + processor
self._callback = callback
self._websocket = websocket.WebSocketApp(
ws_url,
on_open=self._on_websocket_open,
on_message=self._on_websocket_message,
on_error=self._on_websocket_error,
subprotocols=[],
header=[
.format(k, self._client.session.headers[k])
for k in self._client.session.headers
],
)
self._consumer = threading.Thread(target=self._websocket.run_forever)
self._consumer.daemon = True
self._consumer.start() | Begin consuming messages.
:param string instance: (Optional) instance to use in the WebSocket URL
:param string processor: (Optional) processor to use in the WebSocket URL |
5,396 | def get_cache(self, decorated_function, *args, **kwargs):
self.__check(decorated_function, *args, **kwargs)
if decorated_function in self._storage:
for i in self._storage[decorated_function]:
if i[]() == args[0]:
result = i[].cache_entry(*args, **kwargs)
if self.__statistic is True:
if result.has_value is True:
self.__cache_hit += 1
else:
self.__cache_missed += 1
return result
if self.__statistic is True:
self.__cache_missed += 1
return WCacheStorage.CacheEntry() | :meth:`WCacheStorage.get_cache` method implementation |
5,397 | def _unique_class_name(namespace: Dict[str, Any], uuid: uuid.UUID) -> str:
count = 0
name = original_name = + uuid.hex
while name in namespace:
count += 1
name = original_name + + str(count)
return name | Generate unique to namespace name for a class using uuid.
**Parameters**
:``namespace``: the namespace to verify uniqueness against
:``uuid``: the "unique" portion of the name
**Return Value(s)**
A unique string (in namespace) using uuid. |
5,398 | def __get_git_bin():
git =
alternatives = [
]
for alt in alternatives:
if os.path.exists(alt):
git = alt
break
return git | Get git binary location.
:return: Check git location |
5,399 | def repr_setup(self, name=None, col_names=None, col_types=None):
self._name = name or self._name
self._col_types = col_types or self._col_types | This wasn't safe to pass into init because of the inheritance |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.