Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
5,500 | def indentLine(self, block, autoIndent):
indent = None
if indent is None:
indent = self.tryMatchedAnchor(block, autoIndent)
if indent is None:
indent = self.tryCComment(block)
if indent is None and not autoIndent:
indent = self.tryCppComment(block)
if indent is None:
indent = self.trySwitchStatement(block)
if indent is None:
indent = self.tryAccessModifiers(block)
if indent is None:
indent = self.tryBrace(block)
if indent is None:
indent = self.tryCKeywords(block, block.text().lstrip().startswith())
if indent is None:
indent = self.tryCondition(block)
if indent is None:
indent = self.tryStatement(block)
if indent is not None:
return indent
else:
dbg("Nothing matched")
return self._prevNonEmptyBlockIndent(block) | Indent line.
Return filler or null. |
5,501 | def severity_level(self, value):
if value == self._defaults[] and in self._values:
del self._values[]
else:
self._values[] = value | The severity_level property.
Args:
value (int). the property value. |
5,502 | def getBlocks(sentences, n):
blocks = []
for i in range(0, len(sentences), n):
blocks.append(sentences[i:(i+n)])
return blocks | Get blocks of n sentences together.
:param sentences: List of strings where each string is a sentence.
:type sentences: list
:param n: Maximum blocksize for sentences, i.e. a block will be composed of
``n`` sentences.
:type n: int.
:returns: Blocks of n sentences.
:rtype: list-of-lists
.. code-block:: python
import rnlp
example = "Hello there. How are you? I am fine."
sentences = rnlp.getSentences(example)
# ['Hello there', 'How are you', 'I am fine']
blocks = rnlp.getBlocks(sentences, 2)
# with 1: [['Hello there'], ['How are you'], ['I am fine']]
# with 2: [['Hello there', 'How are you'], ['I am fine']]
# with 3: [['Hello there', 'How are you', 'I am fine']] |
5,503 | def create_groups(self, container):
content = self._serialize.body(container, )
response = self._send(http_method=,
location_id=,
version=,
content=content)
return self._deserialize(, self._unwrap_collection(response)) | CreateGroups.
:param :class:`<object> <azure.devops.v5_0.identity.models.object>` container:
:rtype: [Identity] |
5,504 | def get_post_alter_table_index_foreign_key_sql(self, diff):
if not isinstance(diff.from_table, Table):
raise DBALException(
"Sqlite platform requires for alter table the table"
"diff with reference to original table schema"
)
sql = []
if diff.new_name:
table_name = diff.get_new_name()
else:
table_name = diff.get_name(self)
for index in self._get_indexes_in_altered_table(diff).values():
if index.is_primary():
continue
sql.append(
self.get_create_index_sql(index, table_name.get_quoted_name(self))
)
return sql | :param diff: The table diff
:type diff: orator.dbal.table_diff.TableDiff
:rtype: list |
5,505 | def _scan_block(self, cfg_job):
addr = cfg_job.addr
current_func_addr = cfg_job.func_addr
if addr in self._function_addresses_from_symbols:
current_func_addr = addr
if self._addr_hooked_or_syscall(addr):
entries = self._scan_procedure(cfg_job, current_func_addr)
else:
entries = self._scan_irsb(cfg_job, current_func_addr)
return entries | Scan a basic block starting at a specific address
:param CFGJob cfg_job: The CFGJob instance.
:return: a list of successors
:rtype: list |
5,506 | def mail_logger(app, level = None):
credentials = None
if app.config[] and app.config[]:
credentials = (app.config[], app.config[])
secure = None
if app.config[]:
secure = tuple()
config = dict(
mailhost=(app.config[], app.config[]),
fromaddr=app.config[],
toaddrs=app.config[],
credentials = credentials,
subject=,
secure = secure,
timeout=1.0
)
mail_handler = SMTPHandler(**config)
if level is None: level = logging.ERROR
mail_handler.setLevel(level)
mail_log_format =
mail_handler.setFormatter(logging.Formatter(mail_log_format))
return mail_handler | Get mail logger
Returns configured instance of mail logger ready to be attached to app.
Important: app.config['DEBUG'] must be False!
:param app: application instance
:param level: mail errors of this level
:return: SMTPHandler |
5,507 | def _check_node_parameters(self, **kwargs):
if in kwargs:
kwargs[].pop(, )
kwargs[].pop(, )
if in self.__dict__:
self.__dict__[].pop(, )
self.__dict__[].pop(, )
if in kwargs:
if kwargs[] != and kwargs[] != \
:
kwargs.pop()
if in self.__dict__:
if self.__dict__[] != and self.__dict__[] \
!= :
self.__dict__.pop()
if in kwargs:
if kwargs[] != and kwargs[] != \
:
kwargs.pop()
if in self.__dict__:
if self.__dict__[] != and \
self.__dict__[] != :
self.__dict__.pop()
self.__dict__.pop(, )
self.__dict__.pop(, )
return kwargs | See discussion in issue #840. |
5,508 | def projection(radius=5e-6, sphere_index=1.339, medium_index=1.333,
wavelength=550e-9, pixel_size=1e-7, grid_size=(80, 80),
center=(39.5, 39.5)):
x = np.arange(grid_size[0]).reshape(-1, 1)
y = np.arange(grid_size[1]).reshape(1, -1)
cx, cy = center
rpx = radius / pixel_size
r = rpx**2 - (x - cx)**2 - (y - cy)**2
z = np.zeros_like(r)
rvalid = r > 0
z[rvalid] = 2 * np.sqrt(r[rvalid]) * pixel_size
phase = (sphere_index - medium_index) * 2 * np.pi * z / wavelength
meta_data = {"pixel size": pixel_size,
"wavelength": wavelength,
"medium index": medium_index,
"sim center": center,
"sim radius": radius,
"sim index": sphere_index,
"sim model": "projection",
}
qpi = qpimage.QPImage(data=phase, which_data="phase",
meta_data=meta_data)
return qpi | Optical path difference projection of a dielectric sphere
Parameters
----------
radius: float
Radius of the sphere [m]
sphere_index: float
Refractive index of the sphere
medium_index: float
Refractive index of the surrounding medium
wavelength: float
Vacuum wavelength of the imaging light [m]
pixel_size: float
Pixel size [m]
grid_size: tuple of floats
Resulting image size in x and y [px]
center: tuple of floats
Center position in image coordinates [px]
Returns
-------
qpi: qpimage.QPImage
Quantitative phase data set |
5,509 | def _strip_footnote_definitions(self, text):
less_than_tab = self.tab_width - 1
footnote_def_re = re.compile(r % (less_than_tab, self.tab_width, self.tab_width),
re.X | re.M)
return footnote_def_re.sub(self._extract_footnote_def_sub, text) | A footnote definition looks like this:
[^note-id]: Text of the note.
May include one or more indented paragraphs.
Where,
- The 'note-id' can be pretty much anything, though typically it
is the number of the footnote.
- The first paragraph may start on the next line, like so:
[^note-id]:
Text of the note. |
5,510 | def _get_self_bounds(self):
if not self.embed:
raise ValueError()
data = json.loads(self.data)
if not in data.keys():
if not (isinstance(data, dict) and in data.keys()):
data = {: , : data}
data = {: , : [data]}
bounds = [[None, None], [None, None]]
for feature in data[]:
for point in iter_points(feature.get(, {}).get(, {})):
bounds = [
[
none_min(bounds[0][0], point[1]),
none_min(bounds[0][1], point[0]),
],
[
none_max(bounds[1][0], point[1]),
none_max(bounds[1][1], point[0]),
],
]
return bounds | Computes the bounds of the object itself (not including it's children)
in the form [[lat_min, lon_min], [lat_max, lon_max]]. |
5,511 | def decrease_posts_count_after_post_unaproval(sender, instance, **kwargs):
if not instance.pk:
return
profile, dummy = ForumProfile.objects.get_or_create(user=instance.poster)
try:
old_instance = instance.__class__._default_manager.get(pk=instance.pk)
except ObjectDoesNotExist:
return
if old_instance and old_instance.approved is True and instance.approved is False:
profile.posts_count = F() - 1
profile.save() | Decreases the member's post count after a post unaproval.
This receiver handles the unaproval of a forum post: the posts count associated with the post's
author is decreased. |
5,512 | def _GetDirectory(self):
if self.entry_type != definitions.FILE_ENTRY_TYPE_DIRECTORY:
return None
return LVMDirectory(self._file_system, self.path_spec) | Retrieves the directory.
Returns:
LVMDirectory: a directory or None if not available. |
5,513 | def get_cities_by_name(self, name):
if name not in self.cities_by_names:
if self.cities_items is None:
self.cities_items = list(self.get_cities().items())
self.cities_by_names[name] = [dict({gid: city})
for gid, city in self.cities_items if city[] == name]
return self.cities_by_names[name] | Get a list of city dictionaries with the given name.
City names cannot be used as keys, as they are not unique. |
5,514 | def remove(self, *args):
args = self.prepare_args(args)
for index in self._indexes:
index.remove(*args) | Remove the instance tied to the field from all the indexes
For the parameters, seen BaseIndex.remove |
5,515 | def estimateBIsochrone(pot,R,z,phi=None):
if pot is None:
raise IOError("pot= needs to be set to a Potential instance or list thereof")
if isinstance(R,nu.ndarray):
if phi is None: phi= [None for r in R]
bs= nu.array([estimateBIsochrone(pot,R[ii],z[ii],phi=phi[ii],
use_physical=False)
for ii in range(len(R))])
return nu.array([nu.amin(bs[True^nu.isnan(bs)]),
nu.median(bs[True^nu.isnan(bs)]),
nu.amax(bs[True^nu.isnan(bs)])])
else:
r2= R**2.+z**2
r= math.sqrt(r2)
dlvcdlr= dvcircdR(pot,r,phi=phi,use_physical=False)/vcirc(pot,r,phi=phi,use_physical=False)*r
try:
b= optimize.brentq(lambda x: dlvcdlr-(x/math.sqrt(r2+x**2.)-0.5*r2/(r2+x**2.)),
0.01,100.)
except:
b= nu.nan
return b | NAME:
estimateBIsochrone
PURPOSE:
Estimate a good value for the scale of the isochrone potential by matching the slope of the rotation curve
INPUT:
pot- Potential instance or list thereof
R,z - coordinates (if these are arrays, the median estimated delta is returned, i.e., if this is an orbit)
phi= (None) azimuth to use for non-axisymmetric potentials (array if R and z are arrays)
OUTPUT:
b if 1 R,Z given
bmin,bmedian,bmax if multiple R given
HISTORY:
2013-09-12 - Written - Bovy (IAS)
2016-02-20 - Changed input order to allow physical conversions - Bovy (UofT)
2016-06-28 - Added phi= keyword for non-axisymmetric potential - Bovy (UofT) |
5,516 | def _safe_sparse_mask(tensor: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:
try:
return tensor.sparse_mask(mask)
except AttributeError:
return tensor._sparse_mask(mask) | In PyTorch 1.0, Tensor._sparse_mask was changed to Tensor.sparse_mask.
This wrapper allows AllenNLP to (temporarily) work with both 1.0 and 0.4.1. |
5,517 | def get_small_molecule_name(self, hms_lincs_id):
entry = self._get_entry_by_id(self._sm_data, hms_lincs_id)
if not entry:
return None
name = entry[]
return name | Get the name of a small molecule from the LINCS sm metadata.
Parameters
----------
hms_lincs_id : str
The HMS LINCS ID of the small molecule.
Returns
-------
str
The name of the small molecule. |
5,518 | def _normalize(x, cmin=None, cmax=None, clip=True):
if not isinstance(x, np.ndarray):
x = np.array(x)
if cmin is None:
cmin = x.min()
if cmax is None:
cmax = x.max()
if cmin == cmax:
return .5 * np.ones(x.shape)
else:
cmin, cmax = float(cmin), float(cmax)
y = (x - cmin) * 1. / (cmax - cmin)
if clip:
y = np.clip(y, 0., 1.)
return y | Normalize an array from the range [cmin, cmax] to [0,1],
with optional clipping. |
5,519 | def get_connectable_volume_templates(self, start=0, count=-1, filter=, query=, sort=):
uri = self.URI + "/connectable-volume-templates"
get_uri = self._client.build_query_uri(start=start, count=count, filter=filter,
query=query, sort=sort, uri=uri)
return self._client.get(get_uri) | Gets the storage volume templates that are available on the specified networks based on the storage system
port's expected network connectivity. If there are no storage volume templates that meet the specified
connectivity criteria, an empty collection will be returned.
Returns:
list: Storage volume templates. |
5,520 | def add_uuid(dom, uuid):
mods_tag = get_mods_tag(dom)
uuid_tag = dhtmlparser.HTMLElement(
"mods:identifier",
{"type": "uuid"},
[dhtmlparser.HTMLElement(uuid)]
)
insert_tag(uuid_tag, dom.find("mods:identifier"), mods_tag) | Add ``<mods:identifier>`` with `uuid`. |
5,521 | def simplified_pos(pos, tagset=None):
if tagset == :
if pos.startswith() or pos.startswith():
return pos[0]
elif pos.startswith():
return
elif pos.startswith():
return
else:
return None
else:
if pos.startswith() or pos.startswith():
return pos[0]
elif pos.startswith() or pos.startswith():
return pos[:3]
else:
return None | Return a simplified POS tag for a full POS tag `pos` belonging to a tagset `tagset`. By default the WordNet
tagset is assumed.
Does the following conversion by default:
- all N... (noun) tags to 'N'
- all V... (verb) tags to 'V'
- all ADJ... (adjective) tags to 'ADJ'
- all ADV... (adverb) tags to 'ADV'
- all other to None
Does the following conversion by with `tagset=='penn'`:
- all N... (noun) tags to 'N'
- all V... (verb) tags to 'V'
- all JJ... (adjective) tags to 'ADJ'
- all RB... (adverb) tags to 'ADV'
- all other to None |
5,522 | def get_response_data(self, response, parse_json=True):
if response.status_code in (requests.codes.ok, requests.codes.created):
if parse_json:
return response.json()
return response.content
elif response.status_code == requests.codes.bad_request:
response_json = response.json()
raise BadRequestException(response_json.get("error", False) or response_json.get("errors",
_("Bad Request: {text}").format(text=response.text)))
elif response.status_code == requests.codes.not_found:
raise NotFoundException(_("Resource not found: {url}").format(url=response.url))
elif response.status_code == requests.codes.internal_server_error:
raise ServerErrorException(_("Internal server error"))
elif response.status_code in (requests.codes.unauthorized, requests.codes.forbidden):
raise AuthErrorException(_("Access denied"))
elif response.status_code == requests.codes.too_many_requests:
raise RateLimitException(_(response.text))
else:
raise ServerErrorException(_("Unknown error occurred")) | Get response data or throw an appropiate exception
:param response: requests response object
:param parse_json: if True, response will be parsed as JSON
:return: response data, either as json or as a regular response.content object |
5,523 | def samples_to_records(samples, default_keys=None):
from bcbio.pipeline import run_info
RECORD_CONVERT_TO_LIST = set(["config__algorithm__tools_on", "config__algorithm__tools_off",
"reference__genome_context"])
all_keys = _get_all_cwlkeys(samples, default_keys)
out = []
for data in samples:
for raw_key in sorted(list(all_keys)):
key = raw_key.split("__")
if tz.get_in(key, data) is None:
data = tz.update_in(data, key, lambda x: None)
if raw_key not in data["cwl_keys"]:
data["cwl_keys"].append(raw_key)
if raw_key in RECORD_CONVERT_TO_LIST:
val = tz.get_in(key, data)
if not val: val = []
elif not isinstance(val, (list, tuple)): val = [val]
data = tz.update_in(data, key, lambda x: val)
if isinstance(tz.get_in(key, data), bool):
data = tz.update_in(data, key, lambda x: str(tz.get_in(key, data)))
data["metadata"] = run_info.add_metadata_defaults(data.get("metadata", {}))
out.append(data)
return out | Convert samples into output CWL records. |
5,524 | def p_single_line_if(p):
cond_ = p[1]
stat_ = p[2]
p[0] = make_sentence(, cond_, stat_, lineno=p.lineno(1)) | if_inline : if_then_part statements %prec ID
| if_then_part co_statements_co %prec NEWLINE
| if_then_part statements_co %prec NEWLINE
| if_then_part co_statements %prec ID |
5,525 | def DeactivateCard(self, card):
if hasattr(card, ):
card.connection.disconnect()
if None != self.parent.apdutracerpanel:
card.connection.deleteObserver(self.parent.apdutracerpanel)
delattr(card, )
self.dialogpanel.OnDeactivateCard(card) | Deactivate a card. |
5,526 | def init_states(self,
source_encoded: mx.sym.Symbol,
source_encoded_lengths: mx.sym.Symbol,
source_encoded_max_length: int) -> List[mx.sym.Symbol]:
pass | Returns a list of symbolic states that represent the initial states of this decoder.
Used for inference.
:param source_encoded: Encoded source. Shape: (batch_size, source_encoded_max_length, encoder_depth).
:param source_encoded_lengths: Lengths of encoded source sequences. Shape: (batch_size,).
:param source_encoded_max_length: Size of encoder time dimension.
:return: List of symbolic initial states. |
5,527 | def _warning_for_deprecated_user_based_rules(rules):
for rule in rules:
if [resource for resource in USER_BASED_RESOURCES
if resource in rule[0]]:
continue
if in KEY_EXPR.findall(rule[1]):
LOG.warning(_LW("The user_id attribute isn%s'. All the user_id based policy "
"enforcement will be removed in the "
"future."), rule[0]) | Warning user based policy enforcement used in the rule but the rule
doesn't support it. |
5,528 | def restore_breakpoints_state(cls, breakpoints_state_list):
for breakpoint_state in breakpoints_state_list:
bp = cls.breakpoints_by_number[breakpoint_state[0]]
if bp:
bp.enabled = breakpoint_state[1]
bp.condition = breakpoint_state[2]
cls.update_active_breakpoint_flag()
return | Restore the state of breakpoints given a list provided by
backup_breakpoints_state(). If list of breakpoint has changed
since backup missing or added breakpoints are ignored.
breakpoints_state_list is a list of tuple. Each tuple is of form:
(breakpoint_number, enabled, condition) |
5,529 | def out_of_date(self):
try:
latest_remote_sha = self.pr_commits(self.pull_request.refresh(True))[-1].sha
print("Latest remote sha: {}".format(latest_remote_sha))
try:
print("Ratelimit remaining: {}".format(self.github.ratelimit_remaining))
except Exception:
print("Failed to look up ratelimit remaining")
return self.last_sha != latest_remote_sha
except IndexError:
return False | Check if our local latest sha matches the remote latest sha |
5,530 | def add_child_resource_client(self, res_name, res_spec):
res_spec = dict(res_spec)
res_spec[] = res_name
res = self.client_resource_factory(
res_spec, parent=self, logger=self._logger)
self.children[resource.escape_name(res_name)] = res;
self._children_dirty = True
res.set_ioloop(self.ioloop)
res.start()
return res | Add a resource client to the container and start the resource connection |
5,531 | def map_files(self, base_dir, all_components_list):
mapped_paths = []
for components_tupled in all_components_list:
with_base = [base_dir] + list(components_tupled)
mapped_paths.append(self.assert_single_path_by_glob(with_base))
return mapped_paths | Apply `assert_single_path_by_glob()` to all elements of `all_components_list`.
Each element of `all_components_list` should be a tuple of path components, including
wildcards. The elements of each tuple are joined, and interpreted as a glob expression relative
to `base_dir`. The resulting glob should match exactly one path.
:return: List of matched paths, one per element of `all_components_list`.
:raises: :class:`ArchiveFileMapper.ArchiveFileMappingError` if more or less than one path was
matched by one of the glob expressions interpreted from `all_components_list`. |
5,532 | def _pypsa_load_timeseries_aggregated_at_lv_station(network, timesteps):
load_p = []
load_q = []
for lv_grid in network.mv_grid.lv_grids:
load = {}
for lo in lv_grid.graph.nodes_by_attribute():
for sector, val in lo.consumption.items():
load.setdefault(sector, {})
load[sector].setdefault(, [])
load[sector].setdefault(, [])
load[sector][].append(
lo.pypsa_timeseries().rename(repr(lo)).to_frame().loc[
timesteps])
load[sector][].append(
lo.pypsa_timeseries().rename(repr(lo)).to_frame().loc[
timesteps])
for sector, val in load.items():
load_p.append(
pd.concat(val[], axis=1).sum(axis=1).rename(
.join([, sector, repr(lv_grid)])).to_frame())
load_q.append(
pd.concat(val[], axis=1).sum(axis=1).rename(
.join([, sector, repr(lv_grid)])).to_frame())
return load_p, load_q | Aggregates load time series per sector and LV grid.
Parameters
----------
network : Network
The eDisGo grid topology model overall container
timesteps : array_like
Timesteps is an array-like object with entries of type
:pandas:`pandas.Timestamp<timestamp>` specifying which time steps
to export to pypsa representation and use in power flow analysis.
Returns
-------
tuple of :pandas:`pandas.DataFrame<dataframe>`
Tuple of size two containing DataFrames that represent
1. 'p_set' of aggregated Load per sector at each LV station
2. 'q_set' of aggregated Load per sector at each LV station |
5,533 | def submission(self):
if not self._submission:
self._submission = self.reddit_session.get_submission(
url=self._fast_permalink)
return self._submission | Return the Submission object this comment belongs to. |
5,534 | def _reorder_types(self, types_script):
self._logger.debug()
self._logger.debug()
_type_statements = sqlparse.split(types_script)
_type_statements_dict = {}
type_unordered_scripts = []
type_drop_scripts = []
for _type_statement in _type_statements:
_type_statement_parsed = sqlparse.parse(_type_statement)
if len(_type_statement_parsed) > 0:
if _type_statement_parsed[0].get_type() == :
_type_body_r = r
_type_name = re.compile(_type_body_r, flags=re.IGNORECASE).findall(_type_statement)[0]
_type_statements_dict[str(_type_name)] = \
{: _type_statement, : []}
elif _type_statement_parsed[0].get_type() == :
type_drop_scripts.append(_type_statement)
else:
type_unordered_scripts.append(_type_statement)
_deps_unresolved = True
_type_script_order = 0
_type_names = []
type_ordered_scripts = []
while _deps_unresolved:
for k, v in _type_statements_dict.items():
if not v[]:
_type_names.append(k)
v[] = _type_script_order
_type_script_order += 1
if not v[] in type_ordered_scripts:
type_ordered_scripts.append(v[])
else:
_dep_exists = True
for _dep in v[]:
if _dep not in _type_names:
_dep_exists = False
if _dep_exists:
_type_names.append(k)
v[] = _type_script_order
_type_script_order += 1
if not v[] in type_ordered_scripts:
type_ordered_scripts.append(v[])
else:
v[] = -1
_deps_unresolved = False
for k, v in _type_statements_dict.items():
if v[] == -1:
_deps_unresolved = True
return type_drop_scripts, type_ordered_scripts, type_unordered_scripts | Takes type scripts and reorders them to avoid Type doesn't exist exception |
5,535 | def circumradius(self):
return (self.a * self.b * self.c) / (self.area * 4) | Distance from the circumcenter to all the verticies in
the Triangle, float. |
5,536 | def plot_file_distances(dist_matrix):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.matshow(dist_matrix, interpolation=,
cmap=plt.cm.get_cmap()) | Plots dist_matrix
Parameters
----------
dist_matrix: np.ndarray |
5,537 | def run():
server_address = (args.listen_addr, args.listen_port)
httpd = YHSM_VALServer(server_address, YHSM_VALRequestHandler)
my_log_message(args, syslog.LOG_INFO, "Serving requests to (YubiHSM: )" \
% (args.listen_addr, args.listen_port, args.serve_url, args.device))
httpd.serve_forever() | Start the BaseHTTPServer and serve requests forever. |
5,538 | def browse(i):
o=i.get(,)
duoa=i.get(,)
if duoa==:
return {:1, :}
r=ck.access({:,
:work[],
:duoa})
if r[]>0: return r
p=r.get(,{}).get(,)
d=r[]
dn=r.get(,)
shared=d.get(,)
url=d.get(,)
if shared!= and url==:
return {:1, :}
import webbrowser
webbrowser.open(url)
return {:0} | Input: {
data_uoa - data UOA of the repo
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
} |
5,539 | def write(self, filename):
txt = self.tostring()
with open(filename, ) as f:
f.write(txt) | Write the XML job description to a file. |
5,540 | def _get_pq_array_construct(self):
bus_no = integer.setResultsName("bus_no")
s_rating = real.setResultsName("s_rating")
v_rating = real.setResultsName("v_rating")
p = real.setResultsName("p")
q = real.setResultsName("q")
v_max = Optional(real).setResultsName("v_max")
v_min = Optional(real).setResultsName("v_min")
z_conv = Optional(boolean).setResultsName("z_conv")
status = Optional(boolean).setResultsName("status")
pq_data = bus_no + s_rating + v_rating + p + q + v_max + \
v_min + z_conv + status + scolon
pq_data.setParseAction(self.push_pq)
pq_array = Literal("PQ.con") + "=" + "[" + "..." + \
ZeroOrMore(pq_data + Optional("]" + scolon))
return pq_array | Returns a construct for an array of PQ load data. |
5,541 | def make_headers(worksheet):
headers = {}
cell_idx = 0
while cell_idx < worksheet.ncols:
cell_type = worksheet.cell_type(0, cell_idx)
if cell_type == 1:
header = slughifi(worksheet.cell_value(0, cell_idx))
if not header.startswith("_"):
headers[cell_idx] = header
cell_idx += 1
return headers | Make headers from worksheet |
5,542 | def symbol_leading_char(self):
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(
self._ptr, BfdAttributes.SYMBOL_LEADING_CHAR) | Return the symbol leading char attribute of the BFD file being
processed. |
5,543 | def flow_coef_bd(CIJ):
N = len(CIJ)
fc = np.zeros((N,))
total_flo = np.zeros((N,))
max_flo = np.zeros((N,))
for v in range(N):
nb, = np.where(CIJ[v, :] + CIJ[:, v].T)
fc[v] = 0
if np.where(nb)[0].size:
CIJflo = -CIJ[np.ix_(nb, nb)]
for i in range(len(nb)):
for j in range(len(nb)):
if CIJ[nb[i], v] and CIJ[v, nb[j]]:
CIJflo[i, j] += 1
total_flo[v] = np.sum(
(CIJflo == 1) * np.logical_not(np.eye(len(nb))))
max_flo[v] = len(nb) * len(nb) - len(nb)
fc[v] = total_flo[v] / max_flo[v]
fc[np.isnan(fc)] = 0
FC = np.mean(fc)
return fc, FC, total_flo | Computes the flow coefficient for each node and averaged over the
network, as described in Honey et al. (2007) PNAS. The flow coefficient
is similar to betweenness centrality, but works on a local
neighborhood. It is mathematically related to the clustering
coefficient (cc) at each node as, fc+cc <= 1.
Parameters
----------
CIJ : NxN np.ndarray
binary directed connection matrix
Returns
-------
fc : Nx1 np.ndarray
flow coefficient for each node
FC : float
average flow coefficient over the network
total_flo : int
number of paths that "flow" across the central node |
5,544 | def _set_mct_state(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=mct_state.mct_state, is_container=, presence=False, yang_name="mct-state", rest_name="mct-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "container",
: ,
})
self.__mct_state = t
if hasattr(self, ):
self._set() | Setter method for mct_state, mapped from YANG variable /mct_state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_mct_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mct_state() directly.
YANG Description: MCT Operational Information |
5,545 | def attr_name(self):
"Returns attribute name for this facet"
return self.schema.name if self.schema else self.field.name | Returns attribute name for this facet |
5,546 | def join(C, *args, **kwargs):
u = C(.join([str(arg).strip() for arg in args]), **kwargs)
return u | join a list of url elements, and include any keyword arguments, as a new URL |
5,547 | def put(self, f, digest=None):
if digest:
actual_digest = digest
else:
actual_digest = self._compute_digest(f)
created = self.conn.client.blob_put(self.container_name,
actual_digest, f)
if digest:
return created
return actual_digest | Upload a blob
:param f:
File object to be uploaded (required to support seek if digest is
not provided).
:param digest:
Optional SHA-1 hex digest of the file contents. Gets computed
before actual upload if not provided, which requires an extra file
read.
:return:
The hex digest of the uploaded blob if not provided in the call.
Otherwise a boolean indicating if the blob has been newly created. |
5,548 | def parse(self, request, bundle_errors=False):
source = self.source(request)
results = []
_not_found = False
_found = True
for operator in self.operators:
name = self.name + operator.replace("=", "", 1)
if name in source:
if hasattr(source, "getlist"):
values = source.getlist(name)
else:
values = source.get(name)
if not (isinstance(values, collections.MutableSequence) and self.action == ):
values = [values]
for value in values:
if hasattr(value, "strip") and self.trim:
value = value.strip()
if hasattr(value, "lower") and not self.case_sensitive:
value = value.lower()
if hasattr(self.choices, "__iter__"):
self.choices = [choice.lower()
for choice in self.choices]
try:
value = self.convert(value, operator)
except Exception as error:
if self.ignore:
continue
return self.handle_validation_error(error, bundle_errors)
if self.choices and value not in self.choices:
if current_app.config.get("BUNDLE_ERRORS", False) or bundle_errors:
return self.handle_validation_error(
ValueError(u"{0} is not a valid choice".format(
value)), bundle_errors)
self.handle_validation_error(
ValueError(u"{0} is not a valid choice".format(
value)), bundle_errors)
if name in request.unparsed_arguments:
request.unparsed_arguments.pop(name)
results.append(value)
if not results and self.required:
if isinstance(self.location, six.string_types):
error_msg = u"Missing required parameter in {0}".format(
_friendly_location.get(self.location, self.location)
)
else:
friendly_locations = [_friendly_location.get(loc, loc)
for loc in self.location]
error_msg = u"Missing required parameter in {0}".format(
.join(friendly_locations)
)
if current_app.config.get("BUNDLE_ERRORS", False) or bundle_errors:
return self.handle_validation_error(ValueError(error_msg), bundle_errors)
self.handle_validation_error(ValueError(error_msg), bundle_errors)
if not results:
if callable(self.default):
return self.default(), _not_found
else:
return self.default, _not_found
if self.action == :
return results, _found
if self.action == or len(results) == 1:
return results[0], _found
return results, _found | Parses argument value(s) from the request, converting according to
the argument's type.
:param request: The flask request object to parse arguments from
:param bundle_errors: Do not abort when first error occurs, return a
dict with the name of the argument and the error message to be
bundled |
5,549 | def _sanitise(*args):
def _check_option(arg, value):
checked = str()
none_options = _get_options_group("none_options")
hex_options = _get_options_group("hex_options")
hex_or_none_options = _get_options_group("hex_or_none_options")
if not _util._py3k:
if not isinstance(arg, list) and isinstance(arg, unicode):
arg = str(arg)
try:
flag = _is_allowed(arg)
assert flag is not None, "_check_option(): got None for flag"
except (AssertionError, ProtectedOption) as error:
log.warn("_check_option(): %s" % str(error))
else:
checked += (flag + )
if _is_string(value):
values = value.split()
for v in values:
if (flag in none_options) and (v is None):
continue
if flag in hex_options:
if _is_hex(v): checked += (v + " ")
else:
log.debug(" not hex." % (flag, v))
if (flag in hex_or_none_options) and (v is None):
log.debug("Allowing for all keys" % flag)
continue
elif flag in []:
host = _check_keyserver(v)
if host:
log.debug("Setting keyserver: %s" % host)
checked += (v + " ")
else: log.debug("Dropping keyserver: %s" % v)
continue
val = _fix_unsafe(v)
try:
assert not val is None
assert not val.isspace()
assert not v is None
assert not v.isspace()
except:
log.debug("Dropping %s %s" % (flag, v))
continue
if flag in [, , ,
, , ]:
if ( (_util._is_file(val))
or
((flag == ) and (val == )) ):
checked += (val + " ")
else:
log.debug("%s not file: %s" % (flag, val))
elif flag in [, ,
]:
legit_algos = _check_preferences(val, )
if legit_algos: checked += (legit_algos + " ")
else: log.debug(" is not cipher" % val)
elif flag in [, ,
,
]:
legit_algos = _check_preferences(val, )
if legit_algos: checked += (legit_algos + " ")
else: log.debug(" not compress algo" % val)
elif flag == :
legit_models = _check_preferences(val, )
if legit_models: checked += (legit_models + " ")
else: log.debug("%r is not a trust model", val)
elif flag == :
legit_modes = _check_preferences(val, )
if legit_modes: checked += (legit_modes + " ")
else: log.debug("%r is not a pinentry mode", val)
else:
checked += (val + " ")
log.debug("_check_option(): No checks for %s" % val)
return checked.rstrip()
is_flag = lambda x: x.startswith()
def _make_filo(args_string):
filo = arg.split()
filo.reverse()
log.debug("_make_filo(): Converted to reverse list: %s" % filo)
return filo
def _make_groups(filo):
groups = {}
while len(filo) >= 1:
last = filo.pop()
if is_flag(last):
log.debug("Got arg: %s" % last)
if last == :
groups[last] = str(filo.pop())
if len(filo) >= 1 and filo[len(filo)-1] == :
groups[last] += str()
filo.pop()
else:
groups[last] = str()
while len(filo) > 1 and not is_flag(filo[len(filo)-1]):
log.debug("Got value: %s" % filo[len(filo)-1])
groups[last] += (filo.pop() + " ")
else:
if len(filo) == 1 and not is_flag(filo[0]):
log.debug("Got value: %s" % filo[0])
groups[last] += filo.pop()
else:
log.warn("_make_groups(): Got solitary value: %s" % last)
groups["xxx"] = last
return groups
def _check_groups(groups):
log.debug("Got groups: %s" % groups)
checked_groups = []
for a,v in groups.items():
v = None if len(v) == 0 else v
safe = _check_option(a, v)
if safe is not None and not safe.strip() == "":
log.debug("Appending option: %s" % safe)
checked_groups.append(safe)
else:
log.warn("Dropped option: " % (a,v))
return checked_groups
if args is not None:
option_groups = {}
for arg in args:
return sanitised
else:
log.debug("Got None for args") | Take an arg or the key portion of a kwarg and check that it is in the
set of allowed GPG options and flags, and that it has the correct
type. Then, attempt to escape any unsafe characters. If an option is not
allowed, drop it with a logged warning. Returns a dictionary of all
sanitised, allowed options.
Each new option that we support that is not a boolean, but instead has
some additional inputs following it, i.e. "--encrypt-file foo.txt", will
need some basic safety checks added here.
GnuPG has three-hundred and eighteen commandline flags. Also, not all
implementations of OpenPGP parse PGP packets and headers in the same way,
so there is added potential there for messing with calls to GPG.
For information on the PGP message format specification, see
:rfc:`1991`.
If you're asking, "Is this *really* necessary?": No, not really -- we could
just follow the security precautions recommended by `this xkcd`__.
__ https://xkcd.com/1181/
:param str args: (optional) The boolean arguments which will be passed to
the GnuPG process.
:rtype: str
:returns: ``sanitised`` |
5,550 | def collect_genv(self, include_local=True, include_global=True):
e = type(self.genv)()
if include_global:
e.update(self.genv)
if include_local:
for k, v in self.lenv.items():
e[ % (self.obj.name.lower(), k)] = v
return e | Returns a copy of the global environment with all the local variables copied back into it. |
5,551 | def postcode(self):
return "%03d-%04d" % (self.generator.random.randint(0, 999),
self.generator.random.randint(0, 9999)) | :example '101-1212' |
5,552 | def kallisto_general_stats_table(self):
headers = OrderedDict()
headers[] = {
: ,
: ,
: 0,
: ,
:
}
headers[] = {
: ,
: ,
: 100,
: 0,
: ,
:
}
headers[] = {
: .format(config.read_count_prefix),
: .format(config.read_count_desc),
: 0,
: ,
: lambda x: x * config.read_count_multiplier,
:
}
self.general_stats_addcols(self.kallisto_data, headers) | Take the parsed stats from the Kallisto report and add it to the
basic stats table at the top of the report |
5,553 | def playback(cls, filename):
cls.enable()
data = json.loads(open(filename).read())
for item in data:
uri = item[][]
method = item[][]
body = item[][]
headers = item[][]
cls.register_uri(method, uri, body=body, forcing_headers=headers)
yield
cls.disable() | .. testcode::
import io
import json
import requests
import httpretty
with httpretty.record('/tmp/ip.json'):
data = requests.get('https://httpbin.org/ip').json()
with io.open('/tmp/ip.json') as fd:
assert data == json.load(fd)
:param filename: a string
:returns: a `context-manager <https://docs.python.org/3/reference/datamodel.html#context-managers>`_ |
5,554 | def merge_nodes(self, keep_node, kill_node):
for kill_link in kill_node.link_list:
if kill_link.target in self.node_list:
keep_node.add_link(kill_link.target, kill_link.weight)
for node in self.node_list:
for link in node.link_list:
if link.target == kill_node:
node.add_link(keep_node, link.weight)
break
self.remove_node(kill_node) | Merge two nodes in the graph.
Takes two nodes and merges them together, merging their links by
combining the two link lists and summing the weights of links which
point to the same node.
All links in the graph pointing to ``kill_node`` will be merged
into ``keep_node``.
Links belonging to ``kill_node`` which point to targets not in
``self.node_list`` will not be merged into ``keep_node``
Args:
keep_node (Node): node to be kept
kill_node (Node): node to be deleted
Returns: None
Example:
>>> from blur.markov.node import Node
>>> node_1 = Node('One')
>>> node_2 = Node('Two')
>>> node_3 = Node('Three')
>>> node_1.add_link(node_3, 7)
>>> node_2.add_link(node_1, 1)
>>> node_2.add_link(node_2, 3)
>>> node_3.add_link(node_2, 5)
>>> graph = Graph([node_1, node_2, node_3])
>>> print([node.value for node in graph.node_list])
['One', 'Two', 'Three']
>>> graph.merge_nodes(node_2, node_3)
>>> print([node.value for node in graph.node_list])
['One', 'Two']
>>> for link in graph.node_list[1].link_list:
... print('{} {}'.format(link.target.value, link.weight))
One 1
Two 8 |
5,555 | def get_close_matches(word, possibilities, n=None, cutoff=0.6):
if n is None:
n = settings.num_close_matches
return difflib_get_close_matches(word, possibilities, n, cutoff) | Overrides `difflib.get_close_match` to controle argument `n`. |
5,556 | def search(self, query, page=None, per_page=1000, mentions=3, data=False):
if page:
document_list = self._get_search_page(
query,
page=page,
per_page=per_page,
mentions=mentions,
data=data,
)
obj = Document(doc)
obj_list.append(obj)
return obj_list | Retrieve all objects that make a search query.
Will loop through all pages that match unless you provide
the number of pages you'd like to restrict the search to.
Example usage:
>> documentcloud.documents.search('salazar') |
5,557 | def main(args=None):
if args is None:
args = sys.argv[1:]
o = Options()
try:
o.parseOptions(args)
except usage.UsageError, e:
raise SystemExit(str(e))
else:
return createSSLCertificate(o) | Create a private key and a certificate and write them to a file. |
5,558 | def is_img_id_valid(img_id):
t = re.sub(r, , img_id, re.IGNORECASE)
t = re.sub(r, , t)
if img_id != t or img_id.count() != 1:
return False
profile, base_name = img_id.split(, 1)
if not profile or not base_name:
return False
try:
get_profile_configs(profile)
except ValueError:
return False
return True | Checks if img_id is valid. |
5,559 | def update_pypsa_storage_timeseries(network, storages_to_update=None,
timesteps=None):
_update_pypsa_timeseries_by_type(
network, type=, components_to_update=storages_to_update,
timesteps=timesteps) | Updates storage time series in pypsa representation.
This function overwrites p_set and q_set of storage_unit_t attribute of
pypsa network.
Be aware that if you call this function with `timesteps` and thus overwrite
current time steps it may lead to inconsistencies in the pypsa network
since only storage time series are updated but none of the other time
series or the snapshots attribute of the pypsa network. Use the function
:func:`update_pypsa_timeseries` to change the time steps you want to
analyse in the power flow analysis.
This function will also raise an error when a storage that is currently
not in the pypsa representation is added.
Parameters
----------
network : Network
The eDisGo grid topology model overall container
storages_to_update : :obj:`list`, optional
List with all storages (of type :class:`~.grid.components.Storage`)
that need to be updated. If None all storages are updated depending on
mode. See :meth:`~.tools.pypsa_io.to_pypsa` for more information.
timesteps : :pandas:`pandas.DatetimeIndex<datetimeindex>` or :pandas:`pandas.Timestamp<timestamp>`
Timesteps specifies which time steps of the storage time series to
export to pypsa representation. If None all time steps currently
existing in pypsa representation are updated. If not None current time
steps are overwritten by given time steps. Default: None. |
5,560 | def freeze_matrix(script, all_layers=False):
filter_xml = .join([
,
,
% str(all_layers).lower(),
,
,
,
])
util.write_filter(script, filter_xml)
return None | Freeze the current transformation matrix into the coordinates of the
vertices of the mesh (and set this matrix to the identity).
In other words it applies in a definitive way the current matrix to the
vertex coordinates.
Args:
script: the FilterScript object or script filename to write
the filter to.
all_layers (bool): If selected the filter will be applied to all
visible mesh layers. |
5,561 | def _expand_options(cls, options, backend=None):
current_backend = Store.current_backend
try:
backend_options = Store.options(backend=backend or current_backend)
except KeyError as e:
raise Exception( % str(e))
expanded = {}
if isinstance(options, list):
options = merge_options_to_dict(options)
for objspec, options in options.items():
objtype = objspec.split()[0]
if objtype not in backend_options:
raise ValueError(
% objtype)
obj_options = backend_options[objtype]
expanded[objspec] = {g: {} for g in obj_options.groups}
for opt, value in options.items():
found = False
valid_options = []
for g, group_opts in sorted(obj_options.groups.items()):
if opt in group_opts.allowed_keywords:
expanded[objspec][g][opt] = value
found = True
break
valid_options += group_opts.allowed_keywords
if found: continue
cls._options_error(opt, objtype, backend, valid_options)
return expanded | Validates and expands a dictionaries of options indexed by
type[.group][.label] keys into separate style, plot, norm and
output options.
opts._expand_options({'Image': dict(cmap='viridis', show_title=False)})
returns
{'Image': {'plot': dict(show_title=False), 'style': dict(cmap='viridis')}} |
5,562 | def describe_splits(self, cfName, start_token, end_token, keys_per_split):
self._seqid += 1
d = self._reqs[self._seqid] = defer.Deferred()
self.send_describe_splits(cfName, start_token, end_token, keys_per_split)
return d | experimental API for hadoop/parallel query support.
may change violently and without warning.
returns list of token strings such that first subrange is (list[0], list[1]],
next is (list[1], list[2]], etc.
Parameters:
- cfName
- start_token
- end_token
- keys_per_split |
5,563 | def export_avg_losses(ekey, dstore):
dskey = ekey[0]
oq = dstore[]
dt = oq.loss_dt()
name, value, tags = _get_data(dstore, dskey, oq.hazard_stats().items())
writer = writers.CsvWriter(fmt=writers.FIVEDIGITS)
assets = get_assets(dstore)
for tag, values in zip(tags, value.transpose(1, 0, 2)):
dest = dstore.build_fname(name, tag, )
array = numpy.zeros(len(values), dt)
for l, lt in enumerate(dt.names):
array[lt] = values[:, l]
writer.save(compose_arrays(assets, array), dest)
return writer.getsaved() | :param ekey: export key, i.e. a pair (datastore key, fmt)
:param dstore: datastore object |
5,564 | def addFreetextAnnot(self, rect, text, fontsize=12, fontname=None, color=None, rotate=0):
CheckParent(self)
val = _fitz.Page_addFreetextAnnot(self, rect, text, fontsize, fontname, color, rotate)
if not val: return
val.thisown = True
val.parent = weakref.proxy(self)
self._annot_refs[id(val)] = val
return val | Add a 'FreeText' annotation in rectangle 'rect'. |
5,565 | def effects(self, cursor=None, order=, limit=10, sse=False):
return self.horizon.account_effects(
self.address, cursor=cursor, order=order, limit=limit, sse=sse) | Retrieve the effects JSON from this instance's Horizon server.
Retrieve the effects JSON response for the account associated with
this :class:`Address`.
:param cursor: A paging token, specifying where to start returning records from.
When streaming this can be set to "now" to stream object created since your request time.
:type cursor: int, str
:param str order: The order in which to return rows, "asc" or "desc".
:param int limit: Maximum number of records to return.
:param bool sse: Use the SSE client for connecting to Horizon. |
5,566 | def create_button(self, style=Gtk.ReliefStyle.NORMAL):
btn = Gtk.Button()
btn.set_relief(style)
return btn | This is generalized method for creating Gtk.Button |
5,567 | def prepare_env(self):
try:
passwords = self.loader.load_file(, Mapping)
self.expect_passwords = {
re.compile(pattern, re.M): password
for pattern, password in iteritems(passwords)
}
except ConfigurationError:
output.debug()
self.expect_passwords = dict()
self.expect_passwords[pexpect.TIMEOUT] = None
self.expect_passwords[pexpect.EOF] = None
try:
self.env = os.environ.copy()
envvars = self.loader.load_file(, Mapping)
if envvars:
self.env.update({k:six.text_type(v) for k, v in envvars.items()})
if self.envvars and isinstance(self.envvars, dict):
self.env.update({k:six.text_type(v) for k, v in self.envvars.items()})
except ConfigurationError:
output.debug("Not loading environment vars")
self.env = os.environ.copy()
try:
self.settings = self.loader.load_file(, Mapping)
except ConfigurationError:
output.debug("Not loading settings")
self.settings = dict()
try:
self.ssh_key_data = self.loader.load_file(, string_types)
except ConfigurationError:
output.debug("Not loading ssh key")
self.ssh_key_data = None
self.idle_timeout = self.settings.get(, None)
self.job_timeout = self.settings.get(, None)
self.pexpect_timeout = self.settings.get(, 5)
self.process_isolation = self.settings.get(, self.process_isolation)
self.process_isolation_executable = self.settings.get(, self.process_isolation_executable)
self.process_isolation_path = self.settings.get(, self.process_isolation_path)
self.process_isolation_hide_paths = self.settings.get(, self.process_isolation_hide_paths)
self.process_isolation_show_paths = self.settings.get(, self.process_isolation_show_paths)
self.process_isolation_ro_paths = self.settings.get(, self.process_isolation_ro_paths)
self.pexpect_use_poll = self.settings.get(, True)
self.suppress_ansible_output = self.settings.get(, self.quiet)
self.directory_isolation_cleanup = bool(self.settings.get(, True))
if in self.env or not os.path.exists(self.project_dir):
self.cwd = self.private_data_dir
else:
if self.directory_isolation_path is not None:
self.cwd = self.directory_isolation_path
else:
self.cwd = self.project_dir
if in self.settings:
if in self.settings:
if self.settings[] == :
self.fact_cache = os.path.join(self.artifact_dir, self.settings[])
else:
self.fact_cache = os.path.join(self.artifact_dir, self.settings[]) | Manages reading environment metadata files under ``private_data_dir`` and merging/updating
with existing values so the :py:class:`ansible_runner.runner.Runner` object can read and use them easily |
5,568 | def generate_reciprocal_vectors_squared(a1, a2, a3, encut):
for vec in genrecip(a1, a2, a3, encut):
yield np.dot(vec, vec) | Generate reciprocal vector magnitudes within the cutoff along the specied
lattice vectors.
Args:
a1: Lattice vector a (in Bohrs)
a2: Lattice vector b (in Bohrs)
a3: Lattice vector c (in Bohrs)
encut: Reciprocal vector energy cutoff
Returns:
[[g1^2], [g2^2], ...] Square of reciprocal vectors (1/Bohr)^2
determined by a1, a2, a3 and whose magntidue is less than gcut^2. |
5,569 | def check_uid_validity(key, email):
def check(key_uid):
return (email == key_uid.email and
not key_uid.revoked and
not key_uid.invalid and
key_uid.validity >= gpg.constants.validity.FULL)
return any(check(u) for u in key.uids) | Check that a the email belongs to the given key. Also check the trust
level of this connection. Only if the trust level is high enough (>=4) the
email is assumed to belong to the key.
:param key: the GPG key to which the email should belong
:type key: gpg.gpgme._gpgme_key
:param email: the email address that should belong to the key
:type email: str
:returns: whether the key can be assumed to belong to the given email
:rtype: bool |
5,570 | def block_lengths(self):
if self._lengths_cache is None:
self._lengths_cache = np.array(
[obj.length().get() for obj in self._partitions_cache.T[0]]
if len(self._partitions_cache.T) > 0
else []
)
return self._lengths_cache | Gets the lengths of the blocks.
Note: This works with the property structure `_lengths_cache` to avoid
having to recompute these values each time they are needed. |
5,571 | def get_main_chain_layers(self):
main_chain = self.get_main_chain()
ret = []
for u in main_chain:
for v, layer_id in self.adj_list[u]:
if v in main_chain and u in main_chain:
ret.append(layer_id)
return ret | Return a list of layer IDs in the main chain. |
5,572 | def format_query_result(self, query_result, query_path, return_type=list, preceding_depth=None):
if type(query_result) != return_type:
converted_result = self.format_with_handler(query_result, return_type)
else:
converted_result = query_result
converted_result = self.add_preceding_dict(converted_result, query_path, preceding_depth)
return converted_result | Formats the query result based on the return type requested.
:param query_result: (dict or str or list), yaml query result
:param query_path: (str, list(str)), representing query path
:param return_type: type, return type of object user desires
:param preceding_depth: int, the depth to which we want to encapsulate back up config tree
-1 : defaults to entire tree
:return: (dict, OrderedDict, str, list), specified return type |
5,573 | def sort(self, func=None):
if func:
self.data.sort(func)
else:
self.data.sort() | Sorts 'self.data' in-place. Argument:
- func : optional, default 'None' --
- If 'func' not given, sorting will be in ascending
order.
- If 'func' given, it will determine the sort order.
'func' must be a two-argument comparison function
which returns -1, 0, or 1, to mean before, same,
or after ordering. |
5,574 | def dist_abs(
self,
src,
tar,
metric=,
cost=(1, 1, 0.5, 0.5),
layout=,
):
ins_cost, del_cost, sub_cost, shift_cost = cost
if src == tar:
return 0.0
if not src:
return len(tar) * ins_cost
if not tar:
return len(src) * del_cost
keyboard = self._keyboard[layout]
lowercase = {item for sublist in keyboard[0] for item in sublist}
uppercase = {item for sublist in keyboard[1] for item in sublist}
def _kb_array_for_char(char):
if char in lowercase:
return keyboard[0]
elif char in uppercase:
return keyboard[1]
raise ValueError(char + )
def _substitution_cost(char1, char2):
cost = sub_cost
cost *= metric_dict[metric](char1, char2) + shift_cost * (
_kb_array_for_char(char1) != _kb_array_for_char(char2)
)
return cost
def _get_char_coord(char, kb_array):
for row in kb_array:
if char in row:
return kb_array.index(row), row.index(char)
def _euclidean_keyboard_distance(char1, char2):
row1, col1 = _get_char_coord(char1, _kb_array_for_char(char1))
row2, col2 = _get_char_coord(char2, _kb_array_for_char(char2))
return ((row1 - row2) ** 2 + (col1 - col2) ** 2) ** 0.5
def _manhattan_keyboard_distance(char1, char2):
row1, col1 = _get_char_coord(char1, _kb_array_for_char(char1))
row2, col2 = _get_char_coord(char2, _kb_array_for_char(char2))
return abs(row1 - row2) + abs(col1 - col2)
def _log_euclidean_keyboard_distance(char1, char2):
return log(1 + _euclidean_keyboard_distance(char1, char2))
def _log_manhattan_keyboard_distance(char1, char2):
return log(1 + _manhattan_keyboard_distance(char1, char2))
metric_dict = {
: _euclidean_keyboard_distance,
: _manhattan_keyboard_distance,
: _log_euclidean_keyboard_distance,
: _log_manhattan_keyboard_distance,
}
d_mat = np_zeros((len(src) + 1, len(tar) + 1), dtype=np_float32)
for i in range(len(src) + 1):
d_mat[i, 0] = i * del_cost
for j in range(len(tar) + 1):
d_mat[0, j] = j * ins_cost
for i in range(len(src)):
for j in range(len(tar)):
d_mat[i + 1, j + 1] = min(
d_mat[i + 1, j] + ins_cost,
d_mat[i, j + 1] + del_cost,
d_mat[i, j]
+ (
_substitution_cost(src[i], tar[j])
if src[i] != tar[j]
else 0
),
)
return d_mat[len(src), len(tar)] | Return the typo distance between two strings.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
metric : str
Supported values include: ``euclidean``, ``manhattan``,
``log-euclidean``, and ``log-manhattan``
cost : tuple
A 4-tuple representing the cost of the four possible edits:
inserts, deletes, substitutions, and shift, respectively (by
default: (1, 1, 0.5, 0.5)) The substitution & shift costs should be
significantly less than the cost of an insertion & deletion unless
a log metric is used.
layout : str
Name of the keyboard layout to use (Currently supported:
``QWERTY``, ``Dvorak``, ``AZERTY``, ``QWERTZ``)
Returns
-------
float
Typo distance
Raises
------
ValueError
char not found in any keyboard layouts
Examples
--------
>>> cmp = Typo()
>>> cmp.dist_abs('cat', 'hat')
1.5811388
>>> cmp.dist_abs('Niall', 'Neil')
2.8251407
>>> cmp.dist_abs('Colin', 'Cuilen')
3.4142137
>>> cmp.dist_abs('ATCG', 'TAGC')
2.5
>>> cmp.dist_abs('cat', 'hat', metric='manhattan')
2.0
>>> cmp.dist_abs('Niall', 'Neil', metric='manhattan')
3.0
>>> cmp.dist_abs('Colin', 'Cuilen', metric='manhattan')
3.5
>>> cmp.dist_abs('ATCG', 'TAGC', metric='manhattan')
2.5
>>> cmp.dist_abs('cat', 'hat', metric='log-manhattan')
0.804719
>>> cmp.dist_abs('Niall', 'Neil', metric='log-manhattan')
2.2424533
>>> cmp.dist_abs('Colin', 'Cuilen', metric='log-manhattan')
2.2424533
>>> cmp.dist_abs('ATCG', 'TAGC', metric='log-manhattan')
2.3465736 |
5,575 | def makeBasicSolution(self,EndOfPrdvP,aNrm,interpolator):
s consumption-saving problem, with a
consumption function, marginal value function, and minimum m.
'
cNrm,mNrm = self.getPointsForInterpolation(EndOfPrdvP,aNrm)
solution_now = self.usePointsForInterpolation(cNrm,mNrm,interpolator)
return solution_now | Given end of period assets and end of period marginal value, construct
the basic solution for this period.
Parameters
----------
EndOfPrdvP : np.array
Array of end-of-period marginal values.
aNrm : np.array
Array of end-of-period asset values that yield the marginal values
in EndOfPrdvP.
interpolator : function
A function that constructs and returns a consumption function.
Returns
-------
solution_now : ConsumerSolution
The solution to this period's consumption-saving problem, with a
consumption function, marginal value function, and minimum m. |
5,576 | def compute_between_collection_interval_duration(self, prefix):
durations = []
for collection in self.collection_list:
start = collection[0].start_time
end = collection[-1].end_time
durations.append((start, end))
interstices = [durations[i + 1][0] - durations[i][1] for i, d in enumerate(durations[:-1])]
for i, entry in enumerate(interstices):
if interstices[i] < 0:
interstices[i] = 0
self.measures[prefix + ] = get_mean(interstices) \
if len(interstices) > 0 else
if not self.quiet:
print
print self.current_similarity_measure + " between-" + self.current_collection_type + " durations"
table = [(self.current_collection_type + " 1 (start,end)", "Interval",
self.current_collection_type + " 2 (start,end)")] + \
[(str(d1), str(i1), str(d2)) for d1, i1, d2 in zip(durations[:-1], interstices, durations[1:])]
print_table(table)
print
print "Mean " + self.current_similarity_measure + " between-" + self.current_collection_type + " duration", \
self.measures[prefix + ] | Calculates BETWEEN-collection intervals for the current collection and measure type
and takes their mean.
:param str prefix: Prefix for the key entry in self.measures.
Negative intervals (for overlapping clusters) are counted as 0 seconds. Intervals are
calculated as being the difference between the ending time of the last word in a collection
and the start time of the first word in the subsequent collection.
Note that these intervals are not necessarily silences, and may include asides, filled
pauses, words from the examiner, etc.
Adds the following measures to the self.measures dictionary:
- TIMING_(similarity_measure)_(collection_type)_between_collection_interval_duration_mean:
average interval duration separating clusters |
5,577 | def query(cls, url=urljoin(config.API_URL, ), **kwargs):
logger.debug( % (url, json.dumps(kwargs)))
response = requests.post(url, data=json.dumps(kwargs))
if response.status_code != 200:
raise SatSearchError(response.text)
return response.json() | Get request |
5,578 | def user_roles_exists(name, roles, database, user=None, password=None, host=None,
port=None, authdb=None):
*["readWrite"]*[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]
try:
roles = _to_dict(roles)
except Exception:
return
users = user_list(user, password, host, port, database, authdb)
if isinstance(users, six.string_types):
return
for user in users:
if name == dict(user).get():
for role in roles:
if not isinstance(role, dict):
role = {: role, : database}
if role not in dict(user).get(, []):
return False
return True
return False | Checks if a user of a MongoDB database has specified roles
CLI Examples:
.. code-block:: bash
salt '*' mongodb.user_roles_exists johndoe '["readWrite"]' dbname admin adminpwd localhost 27017
.. code-block:: bash
salt '*' mongodb.user_roles_exists johndoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017 |
5,579 | def set_public_domain(self, public_domain=None):
if public_domain is None:
raise NullArgument()
metadata = Metadata(**settings.METADATA[])
if metadata.is_read_only():
raise NoAccess()
if self._is_valid_input(public_domain, metadata, array=False):
self._my_map[] = public_domain
else:
raise InvalidArgument() | Sets the public domain flag.
:param public_domain: the public domain status
:type public_domain: ``boolean``
:raise: ``NoAccess`` -- ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.* |
5,580 | def flatten_all(nested_iterable):
for item in nested_iterable:
if hasattr(item, "__iter__") and not isinstance(item, string_types):
for i in flatten_all(item):
yield i
else:
yield item | Flatten arbitrary depth of nesting. Good for unknown nesting structure
iterable object.
Example::
>>> list(flatten_all([[1, 2], "abc", [3, ["x", "y", "z"]], 4]))
[1, 2, "abc", 3, "x", "y", "z", 4]
**中文文档**
将任意维度的列表压平成一维列表。
注: 使用hasattr(i, "__iter__")方法做是否是可循环对象的判断, 性能要高于其他
任何方法, 例如: isinstance(i, collections.Iterable) |
5,581 | def nailgunned_stdio(cls, sock, env, handle_stdin=True):
stdin_isatty, stdout_isatty, stderr_isatty = NailgunProtocol.isatty_from_env(env)
is_tty_capable = all((stdin_isatty, stdout_isatty, stderr_isatty))
if is_tty_capable:
with cls._tty_stdio(env) as finalizer:
yield finalizer
else:
with cls._pipe_stdio(
sock,
stdin_isatty,
stdout_isatty,
stderr_isatty,
handle_stdin
) as finalizer:
yield finalizer | Redirects stdio to the connected socket speaking the nailgun protocol. |
5,582 | def submit(recaptcha_response, private_key, remoteip):
params = urlencode({
"secret": private_key,
"response": recaptcha_response,
"remoteip": remoteip,
})
if not PY2:
params = params.encode("utf-8")
response = recaptcha_request(params)
data = json.loads(response.read().decode("utf-8"))
response.close()
return RecaptchaResponse(
is_valid=data["success"],
error_codes=data.get("error-codes")
) | Submits a reCAPTCHA request for verification. Returns RecaptchaResponse
for the request
recaptcha_response -- The value of reCAPTCHA response from the form
private_key -- your reCAPTCHA private key
remoteip -- the user's ip address |
5,583 | def encode(self):
return .format(
self.a, self.b, self.c, self.d, self.e, self.f
).encode() | Encode this matrix in binary suitable for including in a PDF |
5,584 | def el_is_empty(el):
if len(el) == 1 and not isinstance(el[0], (list, tuple)):
return True
subels_are_empty = []
for subel in el:
if isinstance(subel, (list, tuple)):
subels_are_empty.append(el_is_empty(subel))
else:
subels_are_empty.append(not bool(subel))
return all(subels_are_empty) | Return ``True`` if tuple ``el`` represents an empty XML element. |
5,585 | def _connect(self):
if self._connParams:
self._conn = MySQLdb.connect(**self._connParams)
else:
self._conn = MySQLdb.connect() | Establish connection to MySQL Database. |
5,586 | def construct_pipeline_block_lambda(env=,
generated=None,
previous_env=None,
region=,
region_subnets=None,
settings=None,
pipeline_data=None):
LOG.info(, env, region)
if env.startswith():
template_name = .format(env)
else:
template_name =
LOG.debug(, env, pformat(settings))
gen_app_name = generated.app_name()
user_data = generate_encoded_user_data(
env=env,
region=region,
generated=generated,
group_name=generated.project,
)
instance_security_groups = sorted(DEFAULT_EC2_SECURITYGROUPS[env])
instance_security_groups.append(gen_app_name)
instance_security_groups.extend(settings[][])
instance_security_groups = remove_duplicate_sg(instance_security_groups)
LOG.info(, instance_security_groups)
data = copy.deepcopy(settings)
data[].update({
: gen_app_name,
: generated.repo,
: generated.project,
: env,
: region,
: json.dumps(region_subnets),
: previous_env,
: user_data,
: json.dumps(instance_security_groups),
: pipeline_data[],
: pipeline_data[],
: pipeline_data[][]
})
LOG.debug(, pformat(data))
pipeline_json = get_template(template_file=template_name, data=data, formats=generated)
return pipeline_json | Create the Pipeline JSON from template.
This handles the common repeatable patterns in a pipeline, such as
judgement, infrastructure, tagger and qe.
Args:
env (str): Deploy environment name, e.g. dev, stage, prod.
generated (gogoutils.Generator): Gogo Application name generator.
previous_env (str): The previous deploy environment to use as
Trigger.
region (str): AWS Region to deploy to.
settings (dict): Environment settings from configurations.
region_subnets (dict): Subnets for a Region, e.g.
{'us-west-2': ['us-west-2a', 'us-west-2b', 'us-west-2c']}.
Returns:
dict: Pipeline JSON template rendered with configurations. |
5,587 | def compute_svd(X, n_components, n_iter, random_state, engine):
if engine == :
engine =
if engine == :
if FBPCA_INSTALLED:
U, s, V = fbpca.pca(X, k=n_components, n_iter=n_iter)
else:
raise ValueError()
elif engine == :
U, s, V = extmath.randomized_svd(
X,
n_components=n_components,
n_iter=n_iter,
random_state=random_state
)
else:
raise ValueError("engine has to be one of (, , )")
U, V = extmath.svd_flip(U, V)
return U, s, V | Computes an SVD with k components. |
5,588 | def get_include_path():
f1 = os.path.basename(sys.argv[0]).lower()
f2 = os.path.basename(sys.executable).lower()
if f1 == f2 or f2 == f1 + :
result = os.path.dirname(os.path.realpath(sys.executable))
else:
result = os.path.dirname(os.path.realpath(__file__))
return result | Default include path using a tricky sys
calls. |
5,589 | def export_obj_str(surface, **kwargs):
vertex_spacing = int(kwargs.get(, 1))
include_vertex_normal = kwargs.get(, False)
include_param_vertex = kwargs.get(, False)
update_delta = kwargs.get(, True)
if surface.pdimension != 2:
raise exch.GeomdlException("Can only export surfaces")
if vertex_spacing < 1:
raise exch.GeomdlException("Vertex spacing should be bigger than zero")
line = "
vertex_offset = 0
str_v = []
str_vn = []
str_vp = []
str_f = []
for srf in surface:
if update_delta:
srf.sample_size_u = surface.sample_size_u
srf.sample_size_v = surface.sample_size_v
srf.tessellate(vertex_spacing=vertex_spacing)
vertices = srf.tessellator.vertices
triangles = srf.tessellator.faces
for vert in vertices:
temp = "v " + str(vert.x) + " " + str(vert.y) + " " + str(vert.z) + "\n"
str_v.append(temp)
if include_param_vertex:
for vert in vertices:
temp = "vp " + str(vert.uv[0]) + " " + str(vert.uv[1]) + "\n"
str_vp.append(temp)
if include_vertex_normal:
for vert in vertices:
sn = operations.normal(srf, vert.uv)
temp = "vn " + str(sn[1][0]) + " " + str(sn[1][1]) + " " + str(sn[1][2]) + "\n"
str_vn.append(temp)
for t in triangles:
vl = t.data
temp = "f " + \
str(vl[0] + 1 + vertex_offset) + " " + \
str(vl[1] + 1 + vertex_offset) + " " + \
str(vl[2] + 1 + vertex_offset) + "\n"
str_f.append(temp)
vertex_offset = len(str_v)
for lv in str_v:
line += lv
for lvn in str_vn:
line += lvn
for lvp in str_vp:
line += lvp
for lf in str_f:
line += lf
return line | Exports surface(s) as a .obj file (string).
Keyword Arguments:
* ``vertex_spacing``: size of the triangle edge in terms of surface points sampled. *Default: 2*
* ``vertex_normals``: if True, then computes vertex normals. *Default: False*
* ``parametric_vertices``: if True, then adds parameter space vertices. *Default: False*
* ``update_delta``: use multi-surface evaluation delta for all surfaces. *Default: True*
:param surface: surface or surfaces to be saved
:type surface: abstract.Surface or multi.SurfaceContainer
:return: contents of the .obj file generated
:rtype: str |
5,590 | def token(self):
if self.next_:
t = self.next_
self.next_ = None
return t
while True:
t = self.lexer.token()
if not t:
return t
if t.type == and (
self.pretok or
(self.last and self.last.type not in self.significant_ws)):
continue
self.pretok = False
if t.type == and self.last and self.last.type not in [, ] and self.last.type != \
and not (hasattr(t, ) and (t.lexer.lexstate == or t.lexer.lexstate == )):
self.next_ = t
tok = lex.LexToken()
tok.type =
tok.value =
tok.lineno = t.lineno
tok.lexpos = t.lexpos
self.last = tok
self.lexer.in_property_decl = False
return tok
self.last = t
break
return t | Token function. Contains 2 hacks:
1. Injects ';' into blocks where the last property
leaves out the ;
2. Strips out whitespace from nonsignificant locations
to ease parsing. |
5,591 | def listener(self, acceptor, wrapper):
self._track_tendril(tend)
tend._start() | Listens for new connections to the manager's endpoint. Once a
new connection is received, a TCPTendril object is generated
for it and it is passed to the acceptor, which must initialize
the state of the connection. If no acceptor is given, no new
connections can be initialized.
:param acceptor: If given, specifies a callable that will be
called with each newly received TCPTendril;
that callable is responsible for initial
acceptance of the connection and for setting
up the initial state of the connection. If
not given, no new connections will be
accepted by the TCPTendrilManager.
:param wrapper: A callable taking, as its first argument, a
socket.socket object. The callable must
return a valid proxy for the socket.socket
object, which will subsequently be used to
communicate on the connection. |
5,592 | def bug_activity(self, bug_id):
params = {
self.PBUG_ID: bug_id
}
response = self.call(self.CGI_BUG_ACTIVITY, params)
return response | Get the activity of a bug in HTML format.
:param bug_id: bug identifier |
5,593 | def completions(self):
debug.speed()
comps = self._evaluator.complete()
debug.speed()
return sorted(comps, key=lambda x: (x.name.lower())) | Return :class:`classes.Completion` objects. Those objects contain
information about the completions, more than just names.
:return: Completion objects, sorted by name.
:rtype: list of :class:`classes.Completion` |
5,594 | def to_dict(self):
if self.subcall is not None:
if isinstance(self.subcall, dict):
subcalls = self.subcall
else:
subcalls = {}
for s in self.subcall:
subcalls.update(s.to_dict())
return {(self.filename, self.line_number, self.name): \
(self.ncalls, self.nonrecursive_calls, self.own_time_s, self.cummulative_time_s, subcalls)}
else:
return {(self.filename, self.line_number, self.name): \
(self.ncalls, self.nonrecursive_calls, self.own_time_s, self.cummulative_time_s)} | Convert back to the pstats dictionary representation (used for saving back as pstats binary file) |
5,595 | def currentVersion(self):
if self._currentVersion is None:
self.__init(self._url)
return self._currentVersion | returns the current version of the site |
5,596 | def _qteMouseClicked(self, widgetObj):
app = qteGetAppletFromWidget(widgetObj)
if app is None:
return
else:
self._qteActiveApplet = app
if not hasattr(widgetObj, ):
self._qteActiveApplet.qteMakeWidgetActive(widgetObj)
else:
if app._qteAdmin.isQtmacsApplet:
self._qteActiveApplet.qteMakeWidgetActive(None)
else:
self._qteActiveApplet.qteMakeWidgetActive(widgetObj)
self._qteFocusManager() | Update the Qtmacs internal focus state as the result of a mouse click.
|Args|
* ``new`` (**QWidget**): the widget that received the focus.
|Returns|
* **None**
|Raises|
* **None** |
5,597 | def colors(self, color_code):
if color_code is None:
color_code = WINDOWS_CODES[]
current_fg, current_bg = self.colors
if color_code == WINDOWS_CODES[]:
final_color_code = self.default_fg | current_bg
elif color_code == WINDOWS_CODES[]:
final_color_code = current_fg | self.default_bg
elif color_code == WINDOWS_CODES[]:
final_color_code = self.default_fg | self.default_bg
elif color_code == WINDOWS_CODES[]:
final_color_code = current_fg
else:
new_is_bg = color_code in self.ALL_BG_CODES
final_color_code = color_code | (current_fg if new_is_bg else current_bg)
self._kernel32.SetConsoleTextAttribute(self._stream_handle, final_color_code) | Change the foreground and background colors for subsequently printed characters.
None resets colors to their original values (when class was instantiated).
Since setting a color requires including both foreground and background codes (merged), setting just the
foreground color resets the background color to black, and vice versa.
This function first gets the current background and foreground colors, merges in the requested color code, and
sets the result.
However if we need to remove just the foreground color but leave the background color the same (or vice versa)
such as when {/red} is used, we must merge the default foreground color with the current background color. This
is the reason for those negative values.
:param int color_code: Color code from WINDOWS_CODES. |
5,598 | def softmax(x: np.ndarray,
b: float = 1.0) -> np.ndarray:
r
constant = np.mean(x)
products = x * b - constant
if products.max() > sys.float_info.max_exp:
log.warning("OVERFLOW in softmax(): x = {}, b = {}, constant = {}, "
"x*b - constant = {}".format(x, b, constant, products))
n = len(x)
index_of_max = np.argmax(products)
answer = np.zeros(n)
answer[index_of_max] = 1.0
else:
exponented = np.exp(products)
answer = exponented / np.sum(exponented)
return answer | r"""
Standard softmax function:
.. math::
P_i = \frac {e ^ {\beta \cdot x_i}} { \sum_{i}{\beta \cdot x_i} }
Args:
x: vector (``numpy.array``) of values
b: exploration parameter :math:`\beta`, or inverse temperature
[Daw2009], or :math:`1/t`; see below
Returns:
vector of probabilities corresponding to the input values
where:
- :math:`t` is temperature (towards infinity: all actions equally likely;
towards zero: probability of action with highest value tends to 1)
- Temperature is not used directly as optimizers may take it to zero,
giving an infinity; use inverse temperature instead.
- [Daw2009] Daw ND, "Trial-by-trial data analysis using computational
methods", 2009/2011; in "Decision Making, Affect, and Learning: Attention
and Performance XXIII"; Delgado MR, Phelps EA, Robbins TW (eds),
Oxford University Press. |
5,599 | def get(self, key, value):
if key == :
response = self._swimlane.request(, .format(value))
if response.status_code == 204:
raise ValueError(.format(value))
return App(
self._swimlane,
response.json()
)
else:
for app in self.list():
if value and value == app.name:
return app
raise ValueError(.format(value)) | Get single app by one of id or name
Supports resource cache
Keyword Args:
id (str): Full app id
name (str): App name
Returns:
App: Corresponding App resource instance
Raises:
TypeError: No or multiple keyword arguments provided
ValueError: No matching app found on server |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.