Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
4,900 | def run(self):
obj_reaction = self._get_objective()
genes = set()
gene_assoc = {}
for reaction in self._model.reactions:
assoc = None
if reaction.genes is None:
continue
elif isinstance(reaction.genes, string_types):
assoc = boolean.Expression(reaction.genes)
else:
variables = [boolean.Variable(g) for g in reaction.genes]
assoc = boolean.Expression(boolean.And(*variables))
genes.update(v.symbol for v in assoc.variables)
gene_assoc[reaction.id] = assoc
reactions = set(self._mm.reactions)
start_time = time.time()
testing_genes = set(self._args.gene)
deleted_reactions = set()
logger.info(.format(
.join(sorted(testing_genes))))
for reaction in reactions:
if reaction not in gene_assoc:
continue
assoc = gene_assoc[reaction]
if any(boolean.Variable(gene) in assoc.variables
for gene in testing_genes):
new_assoc = assoc.substitute(
lambda v: v if v.symbol not in testing_genes else False)
if new_assoc.has_value() and not new_assoc.value:
logger.info(.format(reaction))
deleted_reactions.add(reaction)
if self._args.method in [, ]:
solver = self._get_solver(quadratic=True)
else:
solver = self._get_solver()
if self._args.method == :
logger.info()
prob = fluxanalysis.FluxBalanceProblem(self._mm, solver)
try:
prob.maximize(obj_reaction)
except fluxanalysis.FluxBalanceError as e:
self.report_flux_balance_error(e)
wild = prob.get_flux(obj_reaction)
for reaction in deleted_reactions:
flux_var = prob.get_flux_var(reaction)
prob.prob.add_linear_constraints(flux_var == 0)
prob.maximize(obj_reaction)
deleteflux = prob.get_flux(obj_reaction)
elif self._args.method in [, , , ]:
prob = moma.MOMAProblem(self._mm, solver)
wt_fluxes = prob.get_minimal_fba_flux(obj_reaction)
wild = wt_fluxes[obj_reaction]
for reaction in deleted_reactions:
flux_var = prob.get_flux_var(reaction)
prob.prob.add_linear_constraints(flux_var == 0)
try:
if self._args.method == :
logger.info()
prob.moma(wt_fluxes)
elif self._args.method == :
logger.info()
prob.lin_moma(wt_fluxes)
elif self._args.method == :
logger.info()
prob.moma2(obj_reaction, wild)
elif self._args.method == :
logger.info()
prob.lin_moma2(obj_reaction, wild)
except moma.MOMAError:
self.fail()
deleteflux = prob.get_flux(obj_reaction)
logger.info(
.format(time.time() - start_time))
logger.info(
.format(
deleteflux + 0))
if wild != 0:
logger.info(
.format(
abs(deleteflux / wild))) | Delete the specified gene and solve using the desired method. |
4,901 | def lipisha_ipn(self):
if not (self.request.POST.get() == LIPISHA_API_KEY and
self.request.POST.get() == LIPISHA_API_SIGNATURE):
raise HTTPBadRequest
return process_lipisha_payment(self.request) | Process lipisha IPN - Initiate/Acknowledge |
4,902 | def process_details():
results = {"argv": sys.argv, "working.directory": os.getcwd()}
for key, method in {
"pid": "getpid",
"ppid": "getppid",
"login": "getlogin",
"uid": "getuid",
"euid": "geteuid",
"gid": "getgid",
"egid": "getegid",
"groups": "getgroups",
}.items():
try:
results[key] = getattr(os, method)()
except (AttributeError, OSError):
results[key] = None
return results | Returns details about the current process |
4,903 | def _proxy(self):
if self._context is None:
self._context = AwsContext(self._version, sid=self._solution[], )
return self._context | Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: AwsContext for this AwsInstance
:rtype: twilio.rest.accounts.v1.credential.aws.AwsContext |
4,904 | def submit(self, password=):
url = .format(BASE_URL)
try:
r = requests.post(url,
data=self.dumps(),
headers={: },
auth=(self[][], password))
response = r.json()
except requests.exceptions.HTTPError as e:
logging.error(.format(e))
return Job()
if in response:
logging.error(.format(response[]))
return Job()
return Job(response) | Submits the participation to the web site.
The passwords is sent as plain text.
:return: the evaluation results. |
4,905 | def save_file(fullpath, entry):
with tempfile.NamedTemporaryFile(, delete=False) as file:
tmpfile = file.name
for key, val in entry.items():
print(.format(key, str(val)), file=file)
print(, file=file)
file.write(entry.get_payload())
shutil.move(tmpfile, fullpath) | Save a message file out, without mangling the headers |
4,906 | def make_list_table(headers, data, title=, columns=None):
results = []
add = results.append
add( % title)
add()
if columns:
add( % (.join(str(c) for c in columns)))
add()
add( % headers[0])
for h in headers[1:]:
add( % h)
for row in data:
add( % row[0])
for r in row[1:]:
add( % r)
add()
return .join(results) | Build a list-table directive.
:param headers: List of header values.
:param data: Iterable of row data, yielding lists or tuples with rows.
:param title: Optional text to show as the table title.
:param columns: Optional widths for the columns. |
4,907 | def version(versioninfo=False):
contextkey =
contextkey_info =
if contextkey not in __context__:
try:
version_ = _git_run([, ])[]
except CommandExecutionError as exc:
log.error(
,
exc
)
version_ =
try:
__context__[contextkey] = version_.split()[-1]
except IndexError:
log.error(git --version\)
__context__[contextkey] =
if not versioninfo:
return __context__[contextkey]
if contextkey_info not in __context__:
ptr = __context__.setdefault(contextkey_info, [])
for part in __context__[contextkey].split():
try:
ptr.append(int(part))
except ValueError:
ptr.append(part)
return __context__[contextkey_info] | .. versionadded:: 2015.8.0
Returns the version of Git installed on the minion
versioninfo : False
If ``True``, return the version in a versioninfo list (e.g. ``[2, 5,
0]``)
CLI Example:
.. code-block:: bash
salt myminion git.version |
4,908 | def enforce_reset(self):
ub = (self.ubnd * (1.0+self.bound_tol)).to_dict()
lb = (self.lbnd * (1.0 - self.bound_tol)).to_dict()
val_arr = self.values
for iname, name in enumerate(self.columns):
val_arr[val_arr[:,iname] > ub[name],iname] = ub[name]
val_arr[val_arr[:, iname] < lb[name],iname] = lb[name] | enforce parameter bounds on the ensemble by resetting
violating vals to bound |
4,909 | def _rtg_add_summary_file(eval_files, base_dir, data):
out_file = os.path.join(base_dir, "validate-summary.csv")
if not utils.file_uptodate(out_file, eval_files.get("tp", eval_files.get("fp", eval_files["fn"]))):
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
writer = csv.writer(out_handle)
writer.writerow(["sample", "caller", "vtype", "metric", "value"])
base = _get_sample_and_caller(data)
for metric in ["tp", "fp", "fn"]:
for vtype, bcftools_types in [("SNPs", "--types snps"),
("Indels", "--exclude-types snps")]:
in_file = eval_files.get(metric)
if in_file and os.path.exists(in_file):
cmd = ("bcftools view {bcftools_types} {in_file} | grep -v ^
count = int(subprocess.check_output(cmd.format(**locals()), shell=True))
else:
count = 0
writer.writerow(base + [vtype, metric, count])
eval_files["summary"] = out_file
return eval_files | Parse output TP FP and FN files to generate metrics for plotting. |
4,910 | def plot_lnp(fignum, s, datablock, fpars, direction_type_key):
plot_net(fignum)
dec_key, inc_key, tilt_key = , ,
if in datablock[0].keys():
dec_key, inc_key, tilt_key = , ,
coord = datablock[0][tilt_key]
title = s
if coord == :
title = title + ": specimen coordinates"
if coord == :
title = title + ": geographic coordinates"
if coord == :
title = title + ": tilt corrected coordinates"
DIblock, GCblock = [], []
for plotrec in datablock:
if plotrec[direction_type_key] == :
GCblock.append((float(plotrec[dec_key]), float(plotrec[inc_key])))
else:
DIblock.append((float(plotrec[dec_key]), float(plotrec[inc_key])))
if len(DIblock) > 0:
plot_di(fignum, DIblock)
if len(GCblock) > 0:
for pole in GCblock:
plot_circ(fignum, pole, 90., )
x, y = [], []
XY = pmag.dimap(float(fpars["dec"]), float(fpars["inc"]))
x.append(XY[0])
y.append(XY[1])
plt.figure(num=fignum)
plt.scatter(x, y, marker=, s=80, c=)
plt.title(title)
Xcirc, Ycirc = [], []
Da95, Ia95 = pmag.circ(float(fpars["dec"]), float(
fpars["inc"]), float(fpars["alpha95"]))
for k in range(len(Da95)):
XY = pmag.dimap(Da95[k], Ia95[k])
Xcirc.append(XY[0])
Ycirc.append(XY[1])
plt.plot(Xcirc, Ycirc, ) | plots lines and planes on a great circle with alpha 95 and mean
Parameters
_________
fignum : number of plt.figure() object
datablock : nested list of dictionaries with keys in 3.0 or 2.5 format
3.0 keys: dir_dec, dir_inc, dir_tilt_correction = [-1,0,100], direction_type_key =['p','l']
2.5 keys: dec, inc, tilt_correction = [-1,0,100],direction_type_key =['p','l']
fpars : Fisher parameters calculated by, e.g., pmag.dolnp() or pmag.dolnp3_0()
direction_type_key : key for dictionary direction_type ('specimen_direction_type')
Effects
_______
plots the site level figure |
4,911 | def write(series, output, scale=None):
fsamp = int(series.sample_rate.decompose().value)
if scale is None:
scale = 1 / numpy.abs(series.value).max()
data = (series.value * scale).astype()
return wavfile.write(output, fsamp, data) | Write a `TimeSeries` to a WAV file
Parameters
----------
series : `TimeSeries`
the series to write
output : `file`, `str`
the file object or filename to write to
scale : `float`, optional
the factor to apply to scale the data to (-1.0, 1.0),
pass `scale=1` to not apply any scale, otherwise
the data will be auto-scaled
See also
--------
scipy.io.wavfile.write
for details on how the WAV file is actually written
Examples
--------
>>> from gwpy.timeseries import TimeSeries
>>> t = TimeSeries([1, 2, 3, 4, 5])
>>> t = TimeSeries.write('test.wav') |
4,912 | def command(
commands: str or list,
prefix: str or list = "/",
separator: str = " ",
case_sensitive: bool = False
):
def func(flt, message):
text = message.text or message.caption
if text:
for p in flt.p:
if text.startswith(p):
s = text.split(flt.s)
c, a = s[0][len(p):], s[1:]
c = c if flt.cs else c.lower()
message.command = ([c] + a) if c in flt.c else None
break
return bool(message.command)
commands = commands if type(commands) is list else [commands]
commands = {c if case_sensitive else c.lower() for c in commands}
prefixes = set(prefix) if prefix else {""}
return create("Command", func=func, c=commands, p=prefixes, s=separator, cs=case_sensitive) | Filter commands, i.e.: text messages starting with "/" or any other custom prefix.
Args:
commands (``str`` | ``list``):
The command or list of commands as string the filter should look for.
Examples: "start", ["start", "help", "settings"]. When a message text containing
a command arrives, the command itself and its arguments will be stored in the *command*
field of the :class:`Message <pyrogram.Message>`.
prefix (``str`` | ``list``, *optional*):
A prefix or a list of prefixes as string the filter should look for.
Defaults to "/" (slash). Examples: ".", "!", ["/", "!", "."].
Can be None or "" (empty string) to allow commands with no prefix at all.
separator (``str``, *optional*):
The command arguments separator. Defaults to " " (white space).
Examples: /start first second, /start-first-second, /start.first.second.
case_sensitive (``bool``, *optional*):
Pass True if you want your command(s) to be case sensitive. Defaults to False.
Examples: when True, command="Start" would trigger /Start but not /start. |
4,913 | def get_objects_for_subject(subject=None,
object_category=None,
relation=None,
**kwargs):
searchresult = search_associations(subject=subject,
fetch_objects=True,
rows=0,
object_category=object_category,
relation=relation,
**kwargs
)
objs = searchresult[]
return objs | Convenience method: Given a subject (e.g. gene, disease, variant), return all associated objects (phenotypes, functions, interacting genes, etc) |
4,914 | def wrap_make_secure_channel(make_secure_channel_func, tracer=None):
def call(*args, **kwargs):
channel = make_secure_channel_func(*args, **kwargs)
try:
host = kwargs.get()
tracer_interceptor = OpenCensusClientInterceptor(tracer, host)
intercepted_channel = grpc.intercept_channel(
channel, tracer_interceptor)
return intercepted_channel
except Exception:
log.warning(
)
return channel
return call | Wrap the google.cloud._helpers.make_secure_channel. |
4,915 | def sample_categorical(prob, rng):
ret = numpy.empty(prob.shape[0], dtype=numpy.float32)
for ind in range(prob.shape[0]):
ret[ind] = numpy.searchsorted(numpy.cumsum(prob[ind]), rng.rand()).clip(min=0.0,
max=prob.shape[
1] - 0.5)
return ret | Sample from independent categorical distributions
Each batch is an independent categorical distribution.
Parameters
----------
prob : numpy.ndarray
Probability of the categorical distribution. Shape --> (batch_num, category_num)
rng : numpy.random.RandomState
Returns
-------
ret : numpy.ndarray
Sampling result. Shape --> (batch_num,) |
4,916 | def from_zip(cls, src=, dest=):
try:
zf = zipfile.ZipFile(src, )
except FileNotFoundError:
raise errors.InvalidPathError(src)
except zipfile.BadZipFile:
raise errors.InvalidZipFileError(src)
[zf.extract(file, dest) for file in zf.namelist()]
zf.close()
return cls.from_path(dest) | Unzips a zipped app project file and instantiates it.
:param src: zipfile path
:param dest: destination folder to extract the zipfile content
Returns
A project instance. |
4,917 | def text_pixels(self, text, clear_screen=True, x=0, y=0, text_color=, font=None):
if clear_screen:
self.clear()
if font is not None:
if isinstance(font, str):
assert font in fonts.available(), "%s is an invalid font" % font
font = fonts.load(font)
return self.draw.text((x, y), text, fill=text_color, font=font)
else:
return self.draw.text((x, y), text, fill=text_color) | Display `text` starting at pixel (x, y).
The EV3 display is 178x128 pixels
- (0, 0) would be the top left corner of the display
- (89, 64) would be right in the middle of the display
'text_color' : PIL says it supports "common HTML color names". There
are 140 HTML color names listed here that are supported by all modern
browsers. This is probably a good list to start with.
https://www.w3schools.com/colors/colors_names.asp
'font' : can be any font displayed here
http://ev3dev-lang.readthedocs.io/projects/python-ev3dev/en/ev3dev-stretch/display.html#bitmap-fonts
- If font is a string, it is the name of a font to be loaded.
- If font is a Font object, returned from :meth:`ev3dev2.fonts.load`, then it is
used directly. This is desirable for faster display times. |
4,918 | def parse(
args: typing.List[str] = None,
arg_parser: ArgumentParser = None
) -> dict:
parser = arg_parser or create_parser()
return vars(parser.parse_args(args)) | Parses the arguments for the cauldron server |
4,919 | def authenticate(user=None):
if connexion.request.is_json:
user = UserAuth.from_dict(connexion.request.get_json())
credentials = mapUserAuthToCredentials(user)
auth = ApitaxAuthentication.login(credentials)
if(not auth):
return ErrorResponse(status=401, message="Invalid credentials")
access_token = create_access_token(identity={: user.username, : auth[]})
refresh_token = create_refresh_token(identity={: user.username, : auth[]})
return AuthResponse(status=201, message= + user.username + + auth[], access_token=access_token, refresh_token=refresh_token, auth=UserAuth(username=auth[].username, api_token=auth[].token)) | Authenticate
Authenticate with the API # noqa: E501
:param user: The user authentication object.
:type user: dict | bytes
:rtype: UserAuth |
4,920 | def remove_security_group(self, name):
for group in self.security_groups:
if group.isc_name == name:
group.delete() | Remove a security group from container |
4,921 | def pformat_dict_summary_html(dict):
if not dict:
return
html = []
for key, value in sorted(six.iteritems(dict)):
if not isinstance(value, DICT_EXPANDED_TYPES):
value =
html.append(_format_dict_item(key, value))
return mark_safe(u.join(html)) | Briefly print the dictionary keys. |
4,922 | def main():
args = CLI.parse_args(__doc__)
if args[]:
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
logging.basicConfig(level=logging.DEBUG)
if not args[]:
print("No API key given. Please create an API key on <https://octopart.com/api/dashboard>")
return ReturnValues.NO_APIKEY
if args[] == :
engine = PyPartsOctopart(args[], verbose=args[])
elif args[] == :
engine = PyPartsPartsIO(args[], verbose=args[])
else:
engine = PyPartsBase(args[], verbose=args[])
try:
if in args or in args:
return engine.part_search(args[])
elif in args:
return engine.part_specs(args[])
elif in args:
if args[] == :
if args[]:
return engine.part_datasheet(args[], command=args[], path=args[])
else:
return engine.part_datasheet(args[], command=args[])
elif args[] == :
return engine.part_datasheet(args[], path=args[])
elif in args:
return engine.part_show(args[], printout=args[])
except OctopartException as err:
print(err)
return ReturnValues.RUNTIME_ERROR | entry point of the application.
Parses the CLI commands and runs the actions. |
4,923 | def prob_t_profiles(self, profile_pair, multiplicity, t,
return_log=False, ignore_gaps=True):
if t<0:
logP = -ttconf.BIG_NUMBER
else:
Qt = self.expQt(t)
if len(Qt.shape)==3:
res = np.einsum(, profile_pair[1], Qt, profile_pair[0])
else:
res = np.einsum(, profile_pair[1], Qt, profile_pair[0])
if ignore_gaps and (self.gap_index is not None):
non_gap_frac = (1-profile_pair[0][:,self.gap_index])*(1-profile_pair[1][:,self.gap_index])
logP = np.sum(multiplicity*np.log(res)*non_gap_frac)
else:
logP = np.sum(multiplicity*np.log(res))
return logP if return_log else np.exp(logP) | Calculate the probability of observing a node pair at a distance t
Parameters
----------
profile_pair: numpy arrays
Probability distributions of the nucleotides at either
end of the branch. pp[0] = parent, pp[1] = child
multiplicity : numpy array
The number of times an alignment pattern is observed
t : float
Length of the branch separating parent and child
ignore_gaps: bool
If True, ignore mutations to and from gaps in distance calculations
return_log : bool
Whether or not to exponentiate the result |
4,924 | def normalize_lcdict_byinst(
lcdict,
magcols=,
normto=,
normkeylist=(,,,,,),
debugmode=False,
quiet=False
):
stfccdfltfldprjexpallallzerojmaghmagkmagbmagvmagsdssgsdssrsdssizeroobjectinfoera
if in lcdict and len(lcdict[]) > 0:
if not quiet:
LOGWARNING(
)
return lcdict
normkeycols = []
availablenormkeys = []
for key in normkeylist:
if key in lcdict and lcdict[key] is not None:
normkeycols.append(lcdict[key])
availablenormkeys.append(key)
normkeycols = list(zip(*normkeycols))
allkeys = [repr(x) for x in normkeycols]
allkeys = [a.replace(,).replace(,).replace(" lcapertureslcaperturesobjectinfolcaperturesobjectinfoobjectinfolcaperturesaim_%saim_%sarm_%sarm_%saep_%saep_%satf_%satf_%spsimpsrmpseppstfirm_%sirm_%siep_%siep_%sitf_%sitf_%sallredmagspsrmpsrmepdmagspseppseptfamagspstfpstfepdtfapseppseppstfpstf,t all nan.
thismagsize = thismags[thisind].size
thismagfinite = np.where(np.isfinite(thismags[thisind]))[0].size
if thismagsize > 2 and thismagfinite > 2:
medmag = np.nanmedian(thismags[thisind])
lcdict[col][thisind] = lcdict[col][thisind] - medmag
if debugmode:
LOGDEBUG(
%
(col, nkey, len(thismags[thisind]), medmag))
return lcdict | This is a function to normalize light curves across all instrument
combinations present.
Use this to normalize a light curve containing a variety of:
- HAT station IDs ('stf')
- camera IDs ('ccd')
- filters ('flt')
- observed field names ('fld')
- HAT project IDs ('prj')
- exposure times ('exp')
Parameters
----------
lcdict : dict
The input lcdict to process.
magcols : 'all' or list of str
If this is 'all', all of the columns in the lcdict that are indicated to
be magnitude measurement columns are normalized. If this is a list of
str, must contain the keys of the lcdict specifying which magnitude
columns will be normalized.
normto : {'zero', 'jmag', 'hmag', 'kmag', 'bmag', 'vmag', 'sdssg', 'sdssr', 'sdssi'}
This indicates which column will be the normalization target. If this is
'zero', will normalize to 0.0 for each LC column. Otherwise, will
normalize to the value of one of the other keys in the
lcdict['objectinfo'][magkey], meaning the normalization will be to some
form of catalog magnitude.
normkeylist : list of str
These are the column keys to use to form the normalization
index. Measurements in the specified `magcols` with identical
normalization index values will be considered as part of a single
measurement 'era', and will be normalized to zero. Once all eras have
been normalized this way, the final light curve will be re-normalized as
specified in `normto`.
debugmode : bool
If True, will indicate progress as time-groups are found and processed.
quiet : bool
If True, will not emit any messages when processing.
Returns
-------
dict
Returns the lcdict with the magnitude measurements normalized as
specified. The normalization happens IN PLACE. |
4,925 | def get_event_timelines(self, event_ids, session=None, lightweight=None):
url = % (self.url, )
params = {
: .join(str(x) for x in event_ids),
: ,
: ,
:
}
(response, elapsed_time) = self.request(params=params, session=session, url=url)
return self.process_response(response, resources.EventTimeline, elapsed_time, lightweight) | Returns a list of event timelines based on event id's
supplied.
:param list event_ids: List of event id's to return
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: list[resources.EventTimeline] |
4,926 | def make_if_statement(instr, queue, stack, context):
test_expr = make_expr(stack)
if isinstance(instr, instrs.POP_JUMP_IF_TRUE):
test_expr = ast.UnaryOp(op=ast.Not(), operand=test_expr)
first_block = popwhile(op.is_not(instr.arg), queue, side=)
if isinstance(first_block[-1], instrs.RETURN_VALUE):
body = instrs_to_body(first_block, context)
return ast.If(test=test_expr, body=body, orelse=[])
jump_to_end = expect(
first_block.pop(), instrs.JUMP_FORWARD, "at end of if-block"
)
body = instrs_to_body(first_block, context)
end = jump_to_end.arg
if instr.arg is jump_to_end.arg:
orelse = []
else:
orelse = instrs_to_body(
popwhile(op.is_not(end), queue, side=),
context,
)
return ast.If(test=test_expr, body=body, orelse=orelse) | Make an ast.If block from a POP_JUMP_IF_TRUE or POP_JUMP_IF_FALSE. |
4,927 | def write_file(filename, contents):
contents = "\n".join(contents)
contents = contents.encode("utf-8")
with open(filename, "wb") as f:
f.write(contents) | Create a file with the specified name and write 'contents' (a
sequence of strings without line terminators) to it. |
4,928 | def provides(self):
plist = self.metadata.provides
s = % (self.name, self.version)
if s not in plist:
plist.append(s)
return plist | A set of distribution names and versions provided by this distribution.
:return: A set of "name (version)" strings. |
4,929 | def page_models(self, constructor, paging, constraints=None, *, columns=None, order_by=None):
records, count = self.page(constructor.table_name, paging, constraints, columns=columns,
order_by=order_by)
return ([constructor(r) for r in records], count) | Specialization of DataAccess.page that returns models instead of cursor objects. |
4,930 | def insert(self, packet, **kwargs):
values = [ ]
pd = packet._defn
for defn in pd.fields:
val = getattr(packet.raw, defn.name)
if val is None and defn.name in pd.history:
val = getattr(packet.history, defn.name)
values.append(val)
qmark = [] * len(values)
sql = % (pd.name, .join(qmark))
self._conn.execute(sql, values) | Insert a packet into the database
Arguments
packet
The :class:`ait.core.tlm.Packet` instance to insert into
the database |
4,931 | def _get_db_version(self):
dbname = self._cfg.get(, )
self._execute("SELECT description FROM pg_shdescription JOIN pg_database ON objoid = pg_database.oid WHERE datname = " % dbname)
comment = self._curs_pg.fetchone()
if comment is None:
raise NipapDatabaseNoVersionError("Could not find comment of psql database %s" % dbname)
db_version = None
m = re.match(, comment[0])
if m:
db_version = int(m.group(1))
else:
raise NipapError("Could not match schema version database comment")
return db_version | Get the schema version of the nipap psql db. |
4,932 | def slot_availability_array(events, slots):
array = np.ones((len(events), len(slots)))
for row, event in enumerate(events):
for col, slot in enumerate(slots):
if slot in event.unavailability or event.duration > slot.duration:
array[row, col] = 0
return array | Return a numpy array mapping events to slots
- Rows corresponds to events
- Columns correspond to stags
Array has value 0 if event cannot be scheduled in a given slot
(1 otherwise) |
4,933 | def build_text_part(name, thread, struct):
part_w = None
width = None
minw = 0
maxw = None
width_tuple = struct[]
if width_tuple is not None:
if width_tuple[0] == :
minw, maxw = width_tuple[1:]
content = prepare_string(name, thread, maxw)
if minw:
alignment = struct[]
if alignment == :
content = content.ljust(minw)
elif alignment == :
content = content.center(minw)
else:
content = content.rjust(minw)
text = urwid.Text(content, wrap=)
width = text.pack()[0]
part_w = AttrFlipWidget(text, struct)
return width, part_w | create an urwid.Text widget (wrapped in approproate Attributes)
to display a plain text parts in a threadline.
create an urwid.Columns widget (wrapped in approproate Attributes)
to display a list of tag strings, as part of a threadline.
:param name: id of part to build
:type name: str
:param thread: the thread to get local info for
:type thread: :class:`alot.db.thread.Thread`
:param struct: theming attributes for this part, as provided by
:class:`alot.settings.theme.Theme.get_threadline_theming`
:type struct: dict
:return: overall width (in characters) and a widget.
:rtype: tuple[int, AttrFliwWidget] |
4,934 | def mul(mean1, var1, mean2, var2):
mean = (var1*mean2 + var2*mean1) / (var1 + var2)
var = 1 / (1/var1 + 1/var2)
return (mean, var) | Multiply Gaussian (mean1, var1) with (mean2, var2) and return the
results as a tuple (mean, var).
Strictly speaking the product of two Gaussian PDFs is a Gaussian
function, not Gaussian PDF. It is, however, proportional to a Gaussian
PDF, so it is safe to treat the output as a PDF for any filter using
Bayes equation, which normalizes the result anyway.
Parameters
----------
mean1 : scalar
mean of first Gaussian
var1 : scalar
variance of first Gaussian
mean2 : scalar
mean of second Gaussian
var2 : scalar
variance of second Gaussian
Returns
-------
mean : scalar
mean of product
var : scalar
variance of product
Examples
--------
>>> mul(1, 2, 3, 4)
(1.6666666666666667, 1.3333333333333333)
References
----------
Bromily. "Products and Convolutions of Gaussian Probability Functions",
Tina Memo No. 2003-003.
http://www.tina-vision.net/docs/memos/2003-003.pdf |
4,935 | def EnumerateFilesystemsFromClient(args):
del args
for drive in win32api.GetLogicalDriveStrings().split("\x00"):
if not drive:
continue
try:
volume = win32file.GetVolumeNameForVolumeMountPoint(drive).rstrip("\\")
label, _, _, _, fs_type = win32api.GetVolumeInformation(drive)
except win32api.error:
continue
yield rdf_client_fs.Filesystem(
device=volume,
mount_point="/%s:/" % drive[0],
type=fs_type,
label=UnicodeFromCodePage(label)) | List all local filesystems mounted on this system. |
4,936 | def DecodeValueFromAttribute(self, attribute_name, value, ts):
try:
attribute = Attribute.PREDICATES[attribute_name]
cls = attribute.attribute_type
self._AddAttributeToCache(attribute, LazyDecoder(cls, value, ts),
self.synced_attributes)
except KeyError:
pass
except (ValueError, rdfvalue.DecodeError):
logging.debug("%s: %s invalid encoding. Skipping.", self.urn,
attribute_name) | Given a serialized value, decode the attribute.
Only attributes which have been previously defined are permitted.
Args:
attribute_name: The string name of the attribute.
value: The serialized attribute value.
ts: The timestamp of this attribute. |
4,937 | def fill_tree_from_xml(tag, ar_tree, namespace):
for child in tag:
name_elem = child.find( + namespace + )
if name_elem is not None and child is not None:
fill_tree_from_xml(child, ar_tree.append_child(name_elem.text, child), namespace)
if name_elem is None and child is not None:
fill_tree_from_xml(child, ar_tree, namespace) | Parse the xml tree into ArTree objects. |
4,938 | def tournament_selection(random, population, args):
num_selected = args.setdefault(, 1)
tournament_size = args.setdefault(, 2)
if tournament_size > len(population):
tournament_size = len(population)
selected = []
for _ in range(num_selected):
tourn = random.sample(population, tournament_size)
selected.append(max(tourn))
return selected | Return a tournament sampling of individuals from the population.
This function selects ``num_selected`` individuals from the population.
It selects each one by using random sampling without replacement
to pull ``tournament_size`` individuals and adds the best of the
tournament as its selection. If ``tournament_size`` is greater than
the population size, the population size is used instead as the size
of the tournament.
.. Arguments:
random -- the random number generator object
population -- the population of individuals
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *num_selected* -- the number of individuals to be selected (default 1)
- *tournament_size* -- the tournament size (default 2) |
4,939 | def delete_service(self, service_name, params=None):
if not self.space.has_service_with_name(service_name):
logging.warning("Service not found so... succeeded?")
return True
guid = self.get_instance_guid(service_name)
logging.info("Deleting service %s with guid %s" % (service_name, guid))
return self.api.delete("/v2/service_instances/%s?accepts_incomplete=true" %
(guid), params=params) | Delete the service of the given name. It may fail if there are
any service keys or app bindings. Use purge() if you want
to delete it all. |
4,940 | def suggest_next_locations(self, context = None, pending_X = None, ignored_X = None):
self.model_parameters_iterations = None
self.num_acquisitions = 0
self.context = context
self._update_model(self.normalization_type)
suggested_locations = self._compute_next_evaluations(pending_zipped_X = pending_X, ignored_zipped_X = ignored_X)
return suggested_locations | Run a single optimization step and return the next locations to evaluate the objective.
Number of suggested locations equals to batch_size.
:param context: fixes specified variables to a particular context (values) for the optimization run (default, None).
:param pending_X: matrix of input configurations that are in a pending state (i.e., do not have an evaluation yet) (default, None).
:param ignored_X: matrix of input configurations that the user black-lists, i.e., those configurations will not be suggested again (default, None). |
4,941 | def sojourn_time(p):
p = np.asarray(p)
pii = p.diagonal()
if not (1 - pii).all():
print("Sojourn times are infinite for absorbing states!")
return 1 / (1 - pii) | Calculate sojourn time based on a given transition probability matrix.
Parameters
----------
p : array
(k, k), a Markov transition probability matrix.
Returns
-------
: array
(k, ), sojourn times. Each element is the expected time a Markov
chain spends in each states before leaving that state.
Notes
-----
Refer to :cite:`Ibe2009` for more details on sojourn times for Markov
chains.
Examples
--------
>>> from giddy.markov import sojourn_time
>>> import numpy as np
>>> p = np.array([[.5, .25, .25], [.5, 0, .5], [.25, .25, .5]])
>>> sojourn_time(p)
array([2., 1., 2.]) |
4,942 | def set_breakpoint(self, file_name, line_number, condition=None, enabled=True):
c_file_name = self.canonic(file_name)
import linecache
line = linecache.getline(c_file_name, line_number)
if not line:
return "Line %s:%d does not exist." % (c_file_name, line_number), None
bp = IKBreakpoint(c_file_name, line_number, condition, enabled)
if self.pending_stop or IKBreakpoint.any_active_breakpoint:
self.enable_tracing()
else:
self.disable_tracing()
return None, bp.number | Create a breakpoint, register it in the class's lists and returns
a tuple of (error_message, break_number) |
4,943 | def setup(self, app):
self.logger = app.logger
self.shell.logger = self.logger
if not self.command_name:
raise EmptyCommandNameException()
self.app = app
self.arguments_declaration = self.arguments
self.arguments = app.arguments
if self.use_subconfig:
_init_config(self)
else:
self.config = self.app.config | Setup properties from parent app on the command |
4,944 | def pre_init(self, value, obj):
try:
if obj._state.adding:
pass
return value | Convert a string value to JSON only if it needs to be deserialized.
SubfieldBase metaclass has been modified to call this method instead of
to_python so that we can check the obj state and determine if it needs to be
deserialized |
4,945 | def bounds(self):
bounds = [np.inf,-np.inf, np.inf,-np.inf, np.inf,-np.inf]
def update_bounds(ax, nb, bounds):
if nb[2*ax] < bounds[2*ax]:
bounds[2*ax] = nb[2*ax]
if nb[2*ax+1] > bounds[2*ax+1]:
bounds[2*ax+1] = nb[2*ax+1]
return bounds
for i in range(self.n_blocks):
try:
bnds = self[i].GetBounds()
for a in range(3):
bounds = update_bounds(a, bnds, bounds)
except AttributeError:
pass
return bounds | Finds min/max for bounds across blocks
Returns:
tuple(float):
length 6 tuple of floats containing min/max along each axis |
4,946 | def prior_from_config(cp, variable_params, prior_section,
constraint_section):
logging.info("Setting up priors for each parameter")
dists = distributions.read_distributions_from_config(cp, prior_section)
constraints = distributions.read_constraints_from_config(
cp, constraint_section)
return distributions.JointDistribution(variable_params, *dists,
constraints=constraints) | Gets arguments and keyword arguments from a config file.
Parameters
----------
cp : WorkflowConfigParser
Config file parser to read.
variable_params : list
List of of model parameter names.
prior_section : str
Section to read prior(s) from.
constraint_section : str
Section to read constraint(s) from.
Returns
-------
pycbc.distributions.JointDistribution
The prior. |
4,947 | def WriteSerialized(cls, attribute_container):
json_dict = cls.WriteSerializedDict(attribute_container)
return json.dumps(json_dict) | Writes an attribute container to serialized form.
Args:
attribute_container (AttributeContainer): attribute container.
Returns:
str: A JSON string containing the serialized form. |
4,948 | def packageipa(env, console):
ipa_path, app_path = _get_ipa(env)
output_dir = path.dirname(ipa_path)
if path.exists(ipa_path):
console.quiet( % ipa_path)
os.remove(ipa_path)
zf = zipfile.ZipFile(ipa_path, mode=)
payload_dir =
for (dirpath, dirnames, filenames) in os.walk(app_path):
for filename in filenames:
filepath = path.join(dirpath, filename)
prefix = path.commonprefix([filepath, path.dirname(app_path)])
write_path = path.join(payload_dir, filepath[len(prefix) + 1:])
console.quiet( % write_path)
zf.write(filepath, write_path)
zf.close()
console.quiet( % ipa_path) | Package the built app as an ipa for distribution in iOS App Store |
4,949 | def _default_output_dir():
try:
dataset_name = gin.query_parameter("inputs.dataset_name")
except ValueError:
dataset_name = "random"
dir_name = "{model_name}_{dataset_name}_{timestamp}".format(
model_name=gin.query_parameter("train.model").configurable.name,
dataset_name=dataset_name,
timestamp=datetime.datetime.now().strftime("%Y%m%d_%H%M"),
)
dir_path = os.path.join("~", "trax", dir_name)
print()
trax.log("No --output_dir specified")
return dir_path | Default output directory. |
4,950 | def thaicheck(word: str) -> bool:
pattern = re.compile(r"[ก-ฬฮ]", re.U)
res = re.findall(pattern, word)
if res == []:
return False
if _check1(res[len(res) - 1]) or len(res) == 1:
if _check2(word):
word2 = list(word)
i = 0
thai = True
if word in [
"ฆ่า",
"เฆี่ยน",
"ศึก",
"ศอก",
"เศิก",
"เศร้า",
"ธ",
"ณ",
"ฯพณฯ",
"ใหญ่",
"หญ้า",
"ควาย",
"ความ",
"กริ่งเกรง",
"ผลิ",
]:
return True
while i < len(word2) and thai:
thai = _check3(word2[i])
if not thai:
return False
i += 1
return True
return False
if word in ["กะ", "กระ", "ปะ", "ประ"]:
return True
return False | Check if a word is an "authentic Thai word"
:param str word: word
:return: True or False |
4,951 | def _do_connect(self):
self.load_system_host_keys()
if self.username is None or self.port is None:
self._configure()
try:
self.connect(hostname=self.hostname,
port=self.port,
username=self.username,
key_filename=self.key_filename,
sock=self.proxy)
except socket.error as e:
raise GerritError("Failed to connect to server: %s" % e)
try:
version_string = self._transport.remote_version
pattern = re.compile(r)
self.remote_version = _extract_version(version_string, pattern)
except AttributeError:
self.remote_version = None | Connect to the remote. |
4,952 | def _unsign_data(self, data, options):
if options[] not in self.signature_algorithms:
raise Exception(
% options[])
signature_algorithm = \
self.signature_algorithms[options[]]
algorithm = self._get_algorithm_info(signature_algorithm)
key_salt =
if algorithm[]:
key_salt = data[-algorithm[]:]
data = data[:-algorithm[]]
key = self._generate_key(options[],
self.signature_passphrases, key_salt, algorithm)
data = self._decode(data, algorithm, key)
return data | Verify and remove signature |
4,953 | def node_list_to_coordinate_lines(G, node_list, use_geom=True):
edge_nodes = list(zip(node_list[:-1], node_list[1:]))
lines = []
for u, v in edge_nodes:
data = min(G.get_edge_data(u, v).values(), key=lambda x: x[])
if in data and use_geom:
xs, ys = data[].xy
lines.append(list(zip(xs, ys)))
else:
line = [(x1, y1), (x2, y2)]
lines.append(line)
return lines | Given a list of nodes, return a list of lines that together follow the path
defined by the list of nodes.
Parameters
----------
G : networkx multidigraph
route : list
the route as a list of nodes
use_geom : bool
if True, use the spatial geometry attribute of the edges to draw
geographically accurate edges, rather than just lines straight from node
to node
Returns
-------
lines : list of lines given as pairs ( (x_start, y_start), (x_stop, y_stop) ) |
4,954 | def with_timeout(timeout, d, reactor=reactor):
if timeout is None or not isinstance(d, Deferred):
return d
ret = Deferred(canceller=lambda _: (
d.cancel(),
timeout_d.cancel(),
))
timeout_d = sleep(timeout, reactor)
timeout_d.addCallback(lambda _: (
d.cancel(),
ret.errback(Failure(Timeout())) if not ret.called else None,
))
timeout_d.addErrback(lambda f: f.trap(CancelledError))
d.addCallback(lambda result: (
timeout_d.cancel(),
ret.callback(result),
))
d.addErrback(lambda f: (
if_(not f.check(CancelledError), lambda: (
timeout_d.cancel(),
ret.errback(f),
)),
))
return ret | Returns a `Deferred` that is in all respects equivalent to `d`, e.g. when `cancel()` is called on it `Deferred`,
the wrapped `Deferred` will also be cancelled; however, a `Timeout` will be fired after the `timeout` number of
seconds if `d` has not fired by that time.
When a `Timeout` is raised, `d` will be cancelled. It is up to the caller to worry about how `d` handles
cancellation, i.e. whether it has full/true support for cancelling, or does cancelling it just prevent its callbacks
from being fired but doesn't cancel the underlying operation. |
4,955 | def reset(self):
self.config = None
self.html = None
self.parsed_tree = None
self.tidied = False
self.next_page_link = None
self.title = None
self.author = set()
self.language = None
self.date = None
self.body = None
self.failures = set()
self.success = False
LOGGER.debug(u) | (re)set all instance attributes to default.
Every attribute is set to ``None``, except :attr:`author`
and :attr:`failures` which are set to ``[]``. |
4,956 | async def getiter(self, url: str, url_vars: Dict[str, str] = {},
*, accept: str = sansio.accept_format(),
jwt: Opt[str] = None,
oauth_token: Opt[str] = None
) -> AsyncGenerator[Any, None]:
data, more = await self._make_request("GET", url, url_vars, b"", accept,
jwt=jwt, oauth_token=oauth_token)
if isinstance(data, dict) and "items" in data:
data = data["items"]
for item in data:
yield item
if more:
async for item in self.getiter(more, url_vars, accept=accept,
jwt=jwt, oauth_token=oauth_token):
yield item | Return an async iterable for all the items at a specified endpoint. |
4,957 | def serialize(ad_objects, output_format=, indent=2, attributes_only=False):
if attributes_only:
ad_objects = [key for key in sorted(ad_objects[0].keys())]
if output_format == :
return json.dumps(ad_objects, indent=indent, ensure_ascii=False, sort_keys=True)
elif output_format == :
return yaml.dump(sorted(ad_objects), indent=indent) | Serialize the object to the specified format
:param ad_objects list: A list of ADObjects to serialize
:param output_format str: The output format, json or yaml. Defaults to json
:param indent int: The number of spaces to indent, defaults to 2
:param attributes only: Only serialize the attributes found in the first record of the list
of ADObjects
:return: A serialized, formatted representation of the list of ADObjects
:rtype: str |
4,958 | def linear_trend_timewise(x, param):
ix = x.index
times_seconds = (ix - ix[0]).total_seconds()
times_hours = np.asarray(times_seconds / float(3600))
linReg = linregress(times_hours, x.values)
return [("attr_\"{}\"".format(config["attr"]), getattr(linReg, config["attr"]))
for config in param] | Calculate a linear least-squares regression for the values of the time series versus the sequence from 0 to
length of the time series minus one.
This feature uses the index of the time series to fit the model, which must be of a datetime
dtype.
The parameters control which of the characteristics are returned.
Possible extracted attributes are "pvalue", "rvalue", "intercept", "slope", "stderr", see the documentation of
linregress for more information.
:param x: the time series to calculate the feature of. The index must be datetime.
:type x: pandas.Series
:param param: contains dictionaries {"attr": x} with x an string, the attribute name of the regression model
:type param: list
:return: the different feature values
:return type: list |
4,959 | def __build_cmd_maps(cls):
cmd_map_all = {}
cmd_map_visible = {}
cmd_map_internal = {}
for name in dir(cls):
obj = getattr(cls, name)
if iscommand(obj):
for cmd in getcommands(obj):
if cmd in cmd_map_all.keys():
raise PyShellError("The command already has cmd"
" method , cannot register a"
" second method .".format( \
cmd, cmd_map_all[cmd], obj.__name__))
cmd_map_all[cmd] = obj.__name__
if isvisiblecommand(obj):
cmd_map_visible[cmd] = obj.__name__
if isinternalcommand(obj):
cmd_map_internal[cmd] = obj.__name__
return cmd_map_all, cmd_map_visible, cmd_map_internal | Build the mapping from command names to method names.
One command name maps to at most one method.
Multiple command names can map to the same method.
Only used by __init__() to initialize self._cmd_map. MUST NOT be used
elsewhere.
Returns:
A tuple (cmd_map, hidden_cmd_map, internal_cmd_map). |
4,960 | def load(self, filename, params=None, force=False, depthrange=None, timerange=None, output_is_dict=True, **kwargs):
if (params is not None) & isinstance(params,str): params=[params]
self._filename = filename
try:
ncf = ncfile(self._filename, "r")
except Exception,e:
warn(repr(e),stacklevel=2)
return {}
akeys = ncf.ncattrs()
attrStr=OrderedDict()
for A in akeys : attrStr.update({A:ncf.getncattr(A)})
dum = ncf.variables.keys()
nparam = np.shape(dum)[0]
par_list = np.array([.format(v) for v in ncf.variables.keys()])
par_list = par_list.compress([len(par) != 0 for par in par_list])
nparam = par_list.size
if nparam == 0 : self.Error(.format(self._filename))
ncdimlist = np.array([.format(d) for d in ncf.dimensions.keys()])
ndims = len(ncdimlist)
dimStr = OrderedDict()
dimStr.update({:ndims})
if ndims == 0 : self.Error(.format(self._filename))
checkedDims = np.array([, , , ])
existDim = -np.ones(4,dtype=int)
if not self.use_local_dims :
for i,d in enumerate(ncdimlist) :
if ( (d.lower().startswith()) | (d.lower().find() != -1) ) & (d.find() ==-1) : existDim[0]=i
if ( (d.lower().startswith()) | (d.lower().find() != -1) ) & (d.find() ==-1): existDim[1]=i
if (d.lower().startswith()) | (d.lower().startswith()) : existDim[2]=i
if (d.lower().startswith()) | (d.lower().startswith()) : existDim[3]=i
identified = existDim > -1
for i,d in enumerate(existDim) :
if identified[i] :
dimStr.update({ncdimlist[d]:len(ncf.dimensions[ncdimlist[d]])})
cmd = \
self.message(4, .format(checkedDims[i],cmd))
locals()[checkedDims[i]]=load_ncVar(ncdimlist[d], nc=ncf,**kwargs)
missdims=set(ncdimlist)
missdims.difference_update(ncdimlist[existDim[identified]])
missdims=list(missdims)
for i,d in enumerate(missdims) :
dimStr.update({d:len(ncf.dimensions[d])})
if ncf.variables.has_key(d) :
cmd = \
self.message(4, .format(d,cmd))
locals()[d]=load_ncVar(d, nc=ncf,**kwargs)
else :
self.message(1, .format(d))
ndim=len(ncf.dimensions[d])
cmd =
self.message(4, .format(d,cmd))
locals()[d]={:{:1,d:ndim}, :np.arange(ndim)}
dimlist=ncdimlist.copy()
if identified.sum() > 0 : dimlist[existDim[identified]]=checkedDims[identified]
else : dimlist = dimlist[[]]
if params is not None :
if force : par_list = [i.upper() for i in params]
else :par_list = list(set(params).intersection(par_list))
else : par_list = par_list.tolist()
for d in ncdimlist[existDim[identified]] :
par_list.pop(par_list.index(d))
self.message(2, + str(nparam) + + str(par_list))
if (existDim[0] > -1) & (existDim[1] > -1):
llind, flag = in_limits(lon[],lat[], limit=self.limit)
if isinstance(flag,tuple) :
lon[] = recale(lon[].compress(flag[0]),degrees=True)
lon[][lon[].keys()[1]] = flag[0].sum()
lat[] = lat[].compress(flag[1])
lat[][lat[].keys()[1]] = flag[1].sum()
else :
lon[] = recale(lon[].compress(flag),degrees=True)
lon[][lon[].keys()[1]] = flag.sum()
lat[] = lat[].compress(flag)
lat[][lat[].keys()[1]] = flag.sum()
locals()[ncdimlist[existDim[0]]]=lon.copy()
locals()[ncdimlist[existDim[1]]]=lat.copy()
dimStr.update({ncdimlist[existDim[0]]:len(lon[])})
dimStr.update({ncdimlist[existDim[1]]:len(lat[])})
if (existDim[2] > -1):
if (timerange is not None) : timeflag = (time[] >= np.min(timerange)) & (time[] <= np.max(timerange))
else : timeflag = np.ones(len(time[]), dtype=bool)
if timeflag.sum() == 0 : self.Error(.format(np.min(time), np.max(time)))
time[] = time[].compress(timeflag)
time[][time[].keys()[1]] = timeflag.sum()
locals()[ncdimlist[existDim[2]]]=time.copy()
dimStr.update({ncdimlist[existDim[2]]:len(time[])})
if (existDim[3] > -1):
if (depthrange is not None) : depthflag = (depth[] >= np.min(depthrange)) & (depth[] <= np.max(depthrange))
else : depthflag = np.ones(len(depth[]), dtype=bool)
if depthflag.sum() == 0 : self.Error(.format(np.min(depth), np.max(depth)))
depth[] = depth[].compress(depthflag)
depth[][depth[].keys()[1]] = depthflag.sum()
locals()[ncdimlist[existDim[3]]]=depth.copy()
dimStr.update({ncdimlist[existDim[3]]:len(depth[])})
outStr = OrderedDict()
outStr.update({:dimStr})
outStr.update({:attrStr})
if (existDim[0] > -1) : outStr.update({ncdimlist[existDim[0]]:lon})
if (existDim[1] > -1) : outStr.update({ncdimlist[existDim[1]]:lat})
if (existDim[2] > -1) : outStr.update({ncdimlist[existDim[2]]:time})
if (existDim[3] > -1) : outStr.update({ncdimlist[existDim[3]]:depth})
for d in dimlist.compress([not outStr.has_key(f) for f in dimlist]) :
cmd = \+d+
self.message(4, +cmd)
exec(cmd)
ncdimStr=outStr.copy()
shape=()
for d in dimlist: shape += np.shape(locals()[d][])
ndims = np.size(shape)
for d, ncd in zip(*(dimlist,ncdimlist)):
if not kwargs.has_key(ncd) :
if kwargs.has_key(d) :
kwargs.update({ncd:kwargs[d]})
del kwargs[d]
else :
dvar=ncdimStr[d][]
if isinstance(dvar,np.ma.masked_array) : kwargs.update({ncd:(np.nanmin(dvar.data),np.nanmax(dvar.data))})
else : kwargs.update({ncd:(np.nanmin(dvar),np.nanmax(dvar))})
for param in par_list :
dumVar = load_ncVar(param, nc=ncf, **kwargs)
cmd = \
self.message(4, + cmd)
exec(cmd)
outStr.update(dumStr)
for ddum in dumStr[param][].keys()[1:] :
if outStr[].get(ddum) != dumStr[param][][ddum] : outStr[][ddum]=dumStr[param][][ddum]
ncf.close()
return outStr | NetCDF data loader
:parameter filename: file name
:parameter params: a list of variables to load (default : load ALL variables).
:parameter depthrange: if a depth dimension is found, subset along this dimension.
:parameter timerange: if a time dimension is found, subset along this dimension.
.. note:: using :attr:`altimetry.tools.nctools.limit` allows subsetting to a given region.
:parameter kwargs: additional arguments for subsetting along given dimensions.
.. note:: You can index along any dimension by providing the name of the dimensions to subsample along. Values associated to the provided keywords should be a length 2 or 3 tuple (min,max,<step>) (cf. :func:`altimetry.data.nctools.load_ncVar`).
:keyword output_is_dict: data structures are dictionnaries (eg. my_hydro_data.variable['data']). If false uses an object with attributes (eg. my_hydro_data.variable.data).
:return {type:dict} outStr: Output data structure containing all recorded parameters as specificied by NetCDF file PARAMETER list.
:author: Renaud Dussurget |
4,961 | def minor_releases(self, manager):
return [
key for key, value in six.iteritems(manager)
if any(x for x in value if not x.startswith())
] | Return all minor release line labels found in ``manager``. |
4,962 | def splitter(structured):
actives = []
decoys = []
for mol in structured:
status = mol[3]
if status == :
actives.append(mol)
elif status == :
decoys.append(mol)
return actives, decoys | Separates structured data into a list of actives or a list of decoys. actives are labeled with a '1' in their status
fields, while decoys are labeled with a '0' in their status fields.
:param structured: either roc_structure or score_structure.
roc_structure: list [(id, best_score, best_query, status, fpf, tpf), ..., ]
score_structure: list [(id, best_score, best_query, status, net decoy count, net active count), ...,]
:return: actives: list [(id, best_score, best_query, status = 1, fpf/net decoy count, tpf/net active count), ..., ]
:return decoys: list [(id, best_score, best_query, status = 0, fpf/net decoy count, tpf/net active count), ..., ] |
4,963 | def scope(self, *args, **kwargs):
_scopes = self.scopes(*args, **kwargs)
if len(_scopes) == 0:
raise NotFoundError("No scope fits criteria")
if len(_scopes) != 1:
raise MultipleFoundError("Multiple scopes fit criteria")
return _scopes[0] | Return a single scope based on the provided name.
If additional `keyword=value` arguments are provided, these are added to the request parameters. Please
refer to the documentation of the KE-chain API for additional query parameters.
:return: a single :class:`models.Scope`
:raises NotFoundError: When no `Scope` is found
:raises MultipleFoundError: When more than a single `Scope` is found |
4,964 | def get_sub_extractors_by_property(extractor, property_name, return_property_list=False):
if isinstance(extractor, RecordingExtractor):
if property_name not in extractor.get_channel_property_names():
raise ValueError(" must be must be a property of the recording channels")
else:
sub_list = []
recording = extractor
properties = np.array([recording.get_channel_property(chan, property_name)
for chan in recording.get_channel_ids()])
prop_list = np.unique(properties)
for prop in prop_list:
prop_idx = np.where(prop == properties)
chan_idx = list(np.array(recording.get_channel_ids())[prop_idx])
sub_list.append(SubRecordingExtractor(recording, channel_ids=chan_idx))
if return_property_list:
return sub_list, prop_list
else:
return sub_list
elif isinstance(extractor, SortingExtractor):
if property_name not in extractor.get_unit_property_names():
raise ValueError(" must be must be a property of the units")
else:
sub_list = []
sorting = extractor
properties = np.array([sorting.get_unit_property(unit, property_name)
for unit in sorting.get_unit_ids()])
prop_list = np.unique(properties)
for prop in prop_list:
prop_idx = np.where(prop == properties)
unit_idx = list(np.array(sorting.get_unit_ids())[prop_idx])
sub_list.append(SubSortingExtractor(sorting, unit_ids=unit_idx))
if return_property_list:
return sub_list, prop_list
else:
return sub_list
else:
raise ValueError(" must be a RecordingExtractor or a SortingExtractor") | Divides Recording or Sorting Extractor based on the property_name (e.g. group)
Parameters
----------
extractor: RecordingExtractor or SortingExtractor
The extractor to be subdivided in subextractors
property_name: str
The property used to subdivide the extractor
return_property_list: bool
If True the property list is returned
Returns
-------
List of subextractors |
4,965 | def processRequest(self, arg, **kw):
if self.debug:
log.msg( %str(arg), debug=1)
if arg is None:
return
for h in self.handlers:
arg = h.processRequest(arg, **kw)
s = str(arg)
if self.debug:
log.msg(s, debug=1)
return s | Parameters:
arg -- XML Soap data string |
4,966 | def pop(self, count=1):
if count < 0:
return self.popleft(-count)
new_right_list, new_left_list = PDeque._pop_lists(self._right_list, self._left_list, count)
return PDeque(new_left_list, new_right_list, max(self._length - count, 0), self._maxlen) | Return new deque with rightmost element removed. Popping the empty queue
will return the empty queue. A optional count can be given to indicate the
number of elements to pop. Popping with a negative index is the same as
popleft. Executes in amortized O(k) where k is the number of elements to pop.
>>> pdeque([1, 2]).pop()
pdeque([1])
>>> pdeque([1, 2]).pop(2)
pdeque([])
>>> pdeque([1, 2]).pop(-1)
pdeque([2]) |
4,967 | def GetZipInfo(self):
if not self._zip_info:
location = getattr(self.path_spec, , None)
if location is None:
raise errors.PathSpecError()
if not location.startswith(self._file_system.LOCATION_ROOT):
raise errors.PathSpecError()
if len(location) == 1:
return None
zip_file = self._file_system.GetZipFile()
try:
self._zip_info = zip_file.getinfo(location[1:])
except KeyError:
pass
return self._zip_info | Retrieves the ZIP info object.
Returns:
zipfile.ZipInfo: a ZIP info object or None if not available.
Raises:
PathSpecError: if the path specification is incorrect. |
4,968 | def check_url (self):
try:
url_data = self.urlqueue.get(timeout=QUEUE_POLL_INTERVALL_SECS)
if url_data is not None:
try:
self.check_url_data(url_data)
finally:
self.urlqueue.task_done(url_data)
self.setName(self.origname)
except urlqueue.Empty:
pass
except Exception:
self.internal_error() | Try to get URL data from queue and check it. |
4,969 | def get_sequences_from_cluster(c1, c2, data):
seqs1 = data[c1][]
seqs2 = data[c2][]
seqs = list(set(seqs1 + seqs2))
names = []
for s in seqs:
if s in seqs1 and s in seqs2:
names.append("both")
elif s in seqs1:
names.append(c1)
else:
names.append(c2)
return seqs, names | get all sequences from on cluster |
4,970 | def render(self, fname=):
import qnet.visualization.circuit_pyx as circuit_visualization
from tempfile import gettempdir
from time import time, sleep
if not fname:
tmp_dir = gettempdir()
fname = os.path.join(tmp_dir, "tmp_{}.png".format(hash(time)))
if circuit_visualization.draw_circuit(self, fname):
done = False
for k in range(20):
if os.path.exists(fname):
done = True
break
else:
sleep(.5)
if done:
return fname
raise CannotVisualize() | Render the circuit expression and store the result in a file
Args:
fname (str): Path to an image file to store the result in.
Returns:
str: The path to the image file |
4,971 | def _local_to_shape(self, local_x, local_y):
return (
local_x - self.shape_offset_x,
local_y - self.shape_offset_y
) | Translate local coordinates point to shape coordinates.
Shape coordinates have the same unit as local coordinates, but are
offset such that the origin of the shape coordinate system (0, 0) is
located at the top-left corner of the shape bounding box. |
4,972 | def modis_filename2modisdate(modis_fname):
if not isinstance(modis_fname,list) : modis_fname=[modis_fname]
return [os.path.splitext(os.path.basename(m))[0][1:12] for m in modis_fname] | #+
# MODIS_FILENAME2DATE : Convert MODIS file name to MODIS date
#
# @author: Renaud DUSSURGET (LER PAC/IFREMER)
# @history: Created by RD on 29/10/2012
#
#- |
4,973 | def windyields(self, ini, end, delta, **keyw):
if ("tmass" in keyw) == False:
keyw["tmass"] = "mass"
if ("abund" in keyw) == False:
keyw["abund"] = "iso_massf"
if ("cycle" in keyw) == False:
keyw["cycle"] = "cycle"
print("Windyields() initialised. Reading files...")
ypsinit = []
niso = 0
X_i = []
E_i = []
totalmass = []
ypssurf = []
cycles = []
first = True
wc = self._windcalc
cycleret = self.se.cycles
retrieve = self.se.get
capp = cycles.extend
tapp = totalmass.extend
yapp = ypssurf.extend
for i in range(ini,end+1,delta):
step = int(i)
capp([int(cycleret[i-ini])])
tapp([retrieve(step,keyw["tmass"])])
yapp([retrieve(step,keyw["abund"])])
print("Reading complete. Calculating yields and ejected masses...")
nsteps = len(cycles)-1
niso = len(ypssurf[0])
X_i = np.zeros([niso], float)
E_i = np.zeros([niso], float)
X_i, E_i = wc(first, totalmass, nsteps, niso, ypssurf, \
ypsinit, X_i, E_i, cycles)
return X_i, E_i | This function returns the wind yields and ejected masses.
X_i, E_i = data.windyields(ini, end, delta)
Parameters
----------
ini : integer
The starting cycle.
end : integer
The finishing cycle.
delta : integer
The cycle interval.
keyw : dict
A dict of key word arguments.
Returns
-------
list
The function returns a list of the wind yields(X_i) and
a list of the ejected masses(E_i) in the mass units that
were used (usually solar masses).
Notes
-----
The following keywords cand also be used:
+------------------+---------------+
| Keyword Argument | Default Value |
+==================+===============+
| abund | "iso_massf" |
+------------------+---------------+
| tmass | "mass" |
+------------------+---------------+
| cycle | "cycle" |
+------------------+---------------+
The keyword arguments are used when the variables within the
input file differ in name from their default values typically
found in an MPPNP output file. If the data table differs in
name, use these keywords. For example, if the table for the
abundances is called "abundances" instead of "iso_massf", then
use abund = "abundances" as a keyword argument. |
4,974 | def create_organisation(self, organisation_json):
return trolly.organisation.Organisation(
trello_client=self,
organisation_id=organisation_json[],
name=organisation_json[],
data=organisation_json,
) | Create an Organisation object from a JSON object
Returns:
Organisation: The organisation from the given `organisation_json`. |
4,975 | def on_peer_down(self, peer):
LOG.debug(,
peer.ip_address, peer.version_num)
self._table_manager.clean_stale_routes(peer) | Peer down handler.
Cleans up the paths in global tables that was received from this peer. |
4,976 | def main():
src_dir = sys.argv[1]
os.chdir(src_dir)
config = get_config(src_dir)
cmd =
version = \
subprocess.check_output([cmd], shell=True).strip()
tmp_dist = "/var/deb_dist"
project = config[]
tmp_dist = "/var/deb_dist"
os_version = "1404"
deb_dir = "%s/deb_dist" % config[]
print("Building %s debian packages..." % project)
shutil.copyfile("%s/stdeb.cfg" % (deb_dir), "./stdeb.cfg")
shutil.copytree(deb_dir, tmp_dist)
cmd = + \
% tmp_dist
print(subprocess.check_output([cmd], shell=True))
os.chdir("%s/%s-%s" % (tmp_dist, project, version))
cmd = .split()
subprocess.check_output(cmd)
os.chdir(src_dir)
pkg = "python-%s_%s-1" % (project, version)
os_pkg = pkg + "_%s_all.deb" % os_version
pkg = pkg + "_all.deb"
shutil.copyfile("%s/%s" % (tmp_dist, pkg), "%s/%s" % (deb_dir, os_pkg))
cmd = "python %s/add_pkg_name.py deb_pkg %s/%s" % \
(config[], deb_dir, os_pkg) | main
Entrypoint to this script. This will execute the functionality as a standalone
element |
4,977 | def key_expand(self, key):
key = self._process_value(key, )
payload = {"key": key}
resp = self.call(, payload)
return resp | Derive public key and account number from **private key**
:param key: Private key to generate account and public key of
:type key: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.key_expand(
key="781186FB9EF17DB6E3D1056550D9FAE5D5BBADA6A6BC370E4CBB938B1DC71DA3"
)
{
"private": "781186FB9EF17DB6E3D1056550D9FAE5D5BBADA6A6BC370E4CBB938B1DC71DA3",
"public": "3068BB1CA04525BB0E416C485FE6A67FD52540227D267CC8B6E8DA958A7FA039",
"account": "xrb_1e5aqegc1jb7qe964u4adzmcezyo6o146zb8hm6dft8tkp79za3sxwjym5rx"
} |
4,978 | def binary_report(self, sha256sum, apikey):
url = self.base_url + "file/report"
params = {"apikey": apikey, "resource": sha256sum}
rate_limit_clear = self.rate_limit()
if rate_limit_clear:
response = requests.post(url, data=params)
if response.status_code == self.HTTP_OK:
json_response = response.json()
response_code = json_response[]
return json_response
elif response.status_code == self.HTTP_RATE_EXCEEDED:
time.sleep(20)
else:
self.logger.warning("retrieve report: %s, HTTP code: %d", os.path.basename(filename), response.status_code) | retrieve report from file scan |
4,979 | def save(self):
if not self.dirty:
return
data = .join(map(self.make_relative, self.paths))
if data:
log.debug("Saving %s", self.filename)
data = (
"import sys; sys.__plen = len(sys.path)\n"
"%s\n"
"import sys; new=sys.path[sys.__plen:];"
" del sys.path[sys.__plen:];"
" p=getattr(sys,,0); sys.path[p:p]=new;"
" sys.__egginsert = p+len(new)\n"
) % data
if os.path.islink(self.filename):
os.unlink(self.filename)
f = open(self.filename, )
f.write(data)
f.close()
elif os.path.exists(self.filename):
log.debug("Deleting empty %s", self.filename)
os.unlink(self.filename)
self.dirty = False | Write changed .pth file back to disk |
4,980 | def encrypt(self, txt, key):
assert isinstance(txt, six.text_type), "txt: %s is not text type!" % repr(txt)
assert isinstance(key, six.text_type), "key: %s is not text type!" % repr(key)
if len(txt) != len(key):
raise SecureJSLoginError("encrypt error: %s and must have the same length!" % (txt, key))
pbkdf2_hash = PBKDF2SHA1Hasher1().get_salt_hash(txt)
txt=force_bytes(txt)
key=force_bytes(key)
crypted = self.xor(txt, key)
crypted = binascii.hexlify(crypted)
crypted = six.text_type(crypted, "ascii")
return "%s$%s" % (pbkdf2_hash, crypted) | XOR ciphering with a PBKDF2 checksum |
4,981 | def gct2gctx_main(args):
in_gctoo = parse_gct.parse(args.filename, convert_neg_666=False)
if args.output_filepath is None:
basename = os.path.basename(args.filename)
out_name = os.path.splitext(basename)[0] + ".gctx"
else:
out_name = args.output_filepath
if args.row_annot_path is None:
pass
else:
row_metadata = pd.read_csv(args.row_annot_path, sep=, index_col=0, header=0, low_memory=False)
assert all(in_gctoo.data_df.index.isin(row_metadata.index)), \
"Row ids in matrix missing from annotations file"
in_gctoo.row_metadata_df = row_metadata.loc[row_metadata.index.isin(in_gctoo.data_df.index)]
if args.col_annot_path is None:
pass
else:
col_metadata = pd.read_csv(args.col_annot_path, sep=, index_col=0, header=0, low_memory=False)
assert all(in_gctoo.data_df.columns.isin(col_metadata.index)), \
"Column ids in matrix missing from annotations file"
in_gctoo.col_metadata_df = col_metadata.loc[col_metadata.index.isin(in_gctoo.data_df.columns)]
write_gctx.write(in_gctoo, out_name) | Separate from main() in order to make command-line tool. |
4,982 | def patch_namespaced_role(self, name, namespace, body, **kwargs):
kwargs[] = True
if kwargs.get():
return self.patch_namespaced_role_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_role_with_http_info(name, namespace, body, **kwargs)
return data | partially update the specified Role
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_role(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Role (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1Role
If the method is called asynchronously,
returns the request thread. |
4,983 | def query(self):
query = (
self.book.session.query(Account)
.join(Commodity)
.filter(Commodity.namespace != "template")
.filter(Account.type != AccountType.root.value)
)
return query | Main accounts query |
4,984 | def _premis_version_from_data(data):
for child in data:
if isinstance(child, dict):
version = child.get("version")
if version:
return version
return utils.PREMIS_VERSION | Given tuple ``data`` encoding a PREMIS element, attempt to return the
PREMIS version it is using. If none can be found, return the default PREMIS
version. |
4,985 | def msg_curse(self, args=None, max_width=None):
ret = []
if not self.stats or self.is_disable():
return ret
name_max_width = max_width - 12
msg = .format(, width=name_max_width)
ret.append(self.curse_add_line(msg, "TITLE"))
for i in self.stats:
if i[] == and i[] == []:
continue
ret.append(self.curse_new_line())
msg = .format(i["label"][:name_max_width],
width=name_max_width)
ret.append(self.curse_add_line(msg))
if i[] in (b, b, b, b):
msg = .format(i[])
ret.append(self.curse_add_line(
msg, self.get_views(item=i[self.get_key()],
key=,
option=)))
else:
if (args.fahrenheit and i[] != and
i[] != ):
value = to_fahrenheit(i[])
unit =
else:
value = i[]
unit = i[]
try:
msg = .format(value, unit)
ret.append(self.curse_add_line(
msg, self.get_views(item=i[self.get_key()],
key=,
option=)))
except (TypeError, ValueError):
pass
return ret | Return the dict to display in the curse interface. |
4,986 | def write(self):
if not self.output_files.valid():
raise ValueError(
f
)
else:
if in env.config[] or in env.config[]:
env.log_to_file(
,
f
)
ret = super(RuntimeInfo, self).write()
if ret is False:
env.logger.debug(f)
return ret
send_message_to_controller([, self.sig_id, ret])
send_message_to_controller([
, , self.sig_id,
repr({
: [
str(f.resolve())
for f in self.input_files
if isinstance(f, file_target)
],
: [
str(f.resolve())
for f in self.dependent_files
if isinstance(f, file_target)
],
: [
str(f.resolve())
for f in self.output_files
if isinstance(f, file_target)
]
})
])
return True | Write signature file with signature of script, input, output and dependent files.
Because local input and output files can only be determined after the execution
of workflow. They are not part of the construction. |
4,987 | def save(self, *args, **kwargs):
after_save = kwargs.pop(, True)
super(LayerExternal, self).save(*args, **kwargs)
if after_save:
try:
synchronizer = self.synchronizer
except ImproperlyConfigured:
pass
else:
if synchronizer:
synchronizer.after_external_layer_saved(self.config)
self._reload_schema() | call synchronizer "after_external_layer_saved" method
for any additional operation that must be executed after save |
4,988 | def saelgv(vec1, vec2):
vec1 = stypes.toDoubleVector(vec1)
vec2 = stypes.toDoubleVector(vec2)
smajor = stypes.emptyDoubleVector(3)
sminor = stypes.emptyDoubleVector(3)
libspice.saelgv_c(vec1, vec2, smajor, sminor)
return stypes.cVectorToPython(smajor), stypes.cVectorToPython(sminor) | Find semi-axis vectors of an ellipse generated by two arbitrary
three-dimensional vectors.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/saelgv_c.html
:param vec1: First vector used to generate an ellipse.
:type vec1: 3-Element Array of floats
:param vec2: Second vector used to generate an ellipse.
:type vec2: 3-Element Array of floats
:return: Semi-major axis of ellipse, Semi-minor axis of ellipse.
:rtype: tuple |
4,989 | def stateDict(self):
state = {
: self._duration,
: self._intensity,
: self._risefall,
: self.name
}
return state | Saves internal values to be loaded later
:returns: dict -- {'parametername': value, ...} |
4,990 | def hash_array(vals, encoding=, hash_key=None, categorize=True):
if not hasattr(vals, ):
raise TypeError("must pass a ndarray-like")
dtype = vals.dtype
if hash_key is None:
hash_key = _default_hash_key
if np.issubdtype(dtype, np.complex128):
return hash_array(vals.real) + 23 * hash_array(vals.imag)
elif isinstance(dtype, np.bool):
vals = vals.astype()
elif issubclass(dtype.type, (np.datetime64, np.timedelta64)):
vals = vals.view().astype(, copy=False)
elif issubclass(dtype.type, np.number) and dtype.itemsize <= 8:
vals = vals.view(.format(vals.dtype.itemsize)).astype()
else:
if categorize:
from pandas import factorize, Categorical, Index
codes, categories = factorize(vals, sort=False)
cat = Categorical(codes, Index(categories),
ordered=False, fastpath=True)
return _hash_categorical(cat, encoding, hash_key)
try:
vals = hashing.hash_object_array(vals, hash_key, encoding)
except TypeError:
vals = hashing.hash_object_array(vals.astype(str).astype(object),
hash_key, encoding)
vals ^= vals >> 30
vals *= np.uint64(0xbf58476d1ce4e5b9)
vals ^= vals >> 27
vals *= np.uint64(0x94d049bb133111eb)
vals ^= vals >> 31
return vals | Given a 1d array, return an array of deterministic integers.
.. versionadded:: 0.19.2
Parameters
----------
vals : ndarray, Categorical
encoding : string, default 'utf8'
encoding for data & key when strings
hash_key : string key to encode, default to _default_hash_key
categorize : bool, default True
Whether to first categorize object arrays before hashing. This is more
efficient when the array contains duplicate values.
.. versionadded:: 0.20.0
Returns
-------
1d uint64 numpy array of hash values, same length as the vals |
4,991 | def _start_again(self, message=None):
logging.debug("Start again message delivered: {}".format(message))
the_answer = self._get_text_answer()
return "{0} The correct answer was {1}. Please start a new game.".format(
message,
the_answer
) | Simple method to form a start again message and give the answer in readable form. |
4,992 | def seqToKV(seq, strict=False):
def err(msg):
formatted = % (msg, seq)
if strict:
raise KVFormError(formatted)
else:
logging.warning(formatted)
lines = []
for k, v in seq:
if isinstance(k, bytes):
k = k.decode()
elif not isinstance(k, str):
err( % k)
k = str(k)
if in k:
raise KVFormError(
% (k, ))
if in k:
raise KVFormError(
% (k, ))
if k.strip() != k:
err( % (k, ))
if isinstance(v, bytes):
v = v.decode()
elif not isinstance(v, str):
err( % (v, ))
v = str(v)
if in v:
raise KVFormError(
%
(v, ))
if v.strip() != v:
err( % (v, ))
lines.append(k + + v + )
return .join(lines).encode() | Represent a sequence of pairs of strings as newline-terminated
key:value pairs. The pairs are generated in the order given.
@param seq: The pairs
@type seq: [(str, (unicode|str))]
@return: A string representation of the sequence
@rtype: bytes |
4,993 | def load(patterns, full_reindex):
header()
for pattern in patterns:
for filename in iglob(pattern):
echo(.format(white(filename)))
with open(filename) as f:
reader = csv.reader(f)
reader.next()
for idx, row in enumerate(reader, 1):
try:
advice = csv.from_row(row)
skipped = False
if not full_reindex:
index(advice)
echo( if idx % 50 else white(idx), nl=False)
except Exception:
echo(cyan() if idx % 50 else white(.format(idx)), nl=False)
skipped = True
if skipped:
echo(white(.format(idx)) if idx % 50 else )
else:
echo(white(idx) if idx % 50 else )
success(.format(idx))
if full_reindex:
reindex() | Load one or more CADA CSV files matching patterns |
4,994 | def rotation(f, line = ):
if f.unstructured:
raise ValueError("Rotation requires a structured file")
lines = { : f.fast,
: f.slow,
: f.iline,
: f.xline,
}
if line not in lines:
error = "Unknown line {}".format(line)
solution = "Must be any of: {}".format(.join(lines.keys()))
raise ValueError(.format(error, solution))
l = lines[line]
origin = f.header[0][segyio.su.cdpx, segyio.su.cdpy]
cdpx, cdpy = origin[segyio.su.cdpx], origin[segyio.su.cdpy]
rot = f.xfd.rotation( len(l),
l.stride,
len(f.offsets),
np.fromiter(l.keys(), dtype = np.intc) )
return rot, cdpx, cdpy | Find rotation of the survey
Find the clock-wise rotation and origin of `line` as ``(rot, cdpx, cdpy)``
The clock-wise rotation is defined as the angle in radians between line
given by the first and last trace of the first line and the axis that gives
increasing CDP-Y, in the direction that gives increasing CDP-X.
By default, the first line is the 'fast' direction, which is inlines if the
file is inline sorted, and crossline if it's crossline sorted.
Parameters
----------
f : SegyFile
line : { 'fast', 'slow', 'iline', 'xline' }
Returns
-------
rotation : float
cdpx : int
cdpy : int
Notes
-----
.. versionadded:: 1.2 |
4,995 | def _add_https(self, q):
t include http
or https, add it.
Parameters
==========
q: the parsed image query (names), including the original
registryhttporiginalhttp:registryhttp://%sregistryoriginalhttps:registryhttps://%sregistrys environment
else:
prefix =
nohttps = os.environ.get()
if nohttps != None:
prefix =
q[] = %(prefix, q[])
return q | for push, pull, and other api interactions, the user can optionally
define a custom registry. If the registry name doesn't include http
or https, add it.
Parameters
==========
q: the parsed image query (names), including the original |
4,996 | def list_virtual_machine_scale_set_vm_network_interfaces(scale_set,
vm_index,
resource_group,
**kwargs):
result = {}
netconn = __utils__[](, **kwargs)
try:
nics = __utils__[](
netconn.network_interfaces.list_virtual_machine_scale_set_vm_network_interfaces(
virtual_machine_scale_set_name=scale_set,
virtualmachine_index=vm_index,
resource_group_name=resource_group
)
)
for nic in nics:
result[nic[]] = nic
except CloudError as exc:
__utils__[](, str(exc), **kwargs)
result = {: str(exc)}
return result | .. versionadded:: 2019.2.0
Get information about all network interfaces in a specific virtual machine within a scale set.
:param scale_set: The name of the scale set to query.
:param vm_index: The virtual machine index.
:param resource_group: The resource group name assigned to the
scale set.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.list_virtual_machine_scale_set_vm_network_interfaces testset testvm testgroup |
4,997 | def remove(self, component):
with self.__lock:
factory = self.__names.pop(component)
components = self.__queue[factory]
del components[component]
if not components:
del self.__queue[factory]
try:
with use_ipopo(self.__context) as ipopo:
ipopo.kill(component)
except (BundleException, ValueError):
pass | Kills/Removes the component with the given name
:param component: A component name
:raise KeyError: Unknown component |
4,998 | def _get_nets_lacnic(self, *args, **kwargs):
from warnings import warn
warn(
)
return self.get_nets_lacnic(*args, **kwargs) | Deprecated. This will be removed in a future release. |
4,999 | def increase_route_count(self, crawled_request):
for route in self.__routing_options.routes:
if re.compile(route).match(crawled_request.url):
count_key = str(route) + crawled_request.method
if count_key in self.__routing_count.keys():
self.__routing_count[count_key] += 1
else:
self.__routing_count[count_key] = 1
break | Increase the count that determines how many times a URL of a certain route has been crawled.
Args:
crawled_request (:class:`nyawc.http.Request`): The request that possibly matches a route. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.