Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
1,900 | def validate_request():
flask_request = request
request_data = flask_request.get_data()
if not request_data:
request_data = b
request_data = request_data.decode()
try:
json.loads(request_data)
except ValueError as json_error:
LOG.debug("Request body was not json. Exception: %s", str(json_error))
return LambdaErrorResponses.invalid_request_content(
"Could not parse request body into json: No JSON object could be decoded")
if flask_request.args:
LOG.debug("Query parameters are in the request but not supported")
return LambdaErrorResponses.invalid_request_content("Query Parameters are not supported")
request_headers = CaseInsensitiveDict(flask_request.headers)
log_type = request_headers.get(, )
if log_type != :
LOG.debug("log-type: %s is not supported. None is only supported.", log_type)
return LambdaErrorResponses.not_implemented_locally(
"log-type: {} is not supported. None is only supported.".format(log_type))
invocation_type = request_headers.get(, )
if invocation_type != :
LOG.warning("invocation-type: %s is not supported. RequestResponse is only supported.", invocation_type)
return LambdaErrorResponses.not_implemented_locally(
"invocation-type: {} is not supported. RequestResponse is only supported.".format(invocation_type)) | Validates the incoming request
The following are invalid
1. The Request data is not json serializable
2. Query Parameters are sent to the endpoint
3. The Request Content-Type is not application/json
4. 'X-Amz-Log-Type' header is not 'None'
5. 'X-Amz-Invocation-Type' header is not 'RequestResponse'
Returns
-------
flask.Response
If the request is not valid a flask Response is returned
None:
If the request passes all validation |
1,901 | def _get_rate(self, mag):
mag_lo = mag - self.bin_width / 2.0
mag_hi = mag + self.bin_width / 2.0
if mag >= self.min_mag and mag < self.char_mag - DELTA_CHAR / 2:
return (10 ** (self.a_val - self.b_val * mag_lo)
- 10 ** (self.a_val - self.b_val * mag_hi))
else:
return (self.char_rate / DELTA_CHAR) * self.bin_width | Calculate and return the annual occurrence rate for a specific bin.
:param mag:
Magnitude value corresponding to the center of the bin of interest.
:returns:
Float number, the annual occurrence rate for the :param mag value. |
1,902 | def delete_refund_transaction_by_id(cls, refund_transaction_id, **kwargs):
kwargs[] = True
if kwargs.get():
return cls._delete_refund_transaction_by_id_with_http_info(refund_transaction_id, **kwargs)
else:
(data) = cls._delete_refund_transaction_by_id_with_http_info(refund_transaction_id, **kwargs)
return data | Delete RefundTransaction
Delete an instance of RefundTransaction by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_refund_transaction_by_id(refund_transaction_id, async=True)
>>> result = thread.get()
:param async bool
:param str refund_transaction_id: ID of refundTransaction to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread. |
1,903 | def cores_orthogonalization_step(coresX, dim, left_to_right=True):
cc = coresX[dim]
r1, n, r2 = cc.shape
if left_to_right:
assert(0 <= dim < len(coresX) - 1)
cc, rr = np.linalg.qr(reshape(cc, (-1, r2)))
r2 = cc.shape[1]
coresX[dim] = reshape(cc, (r1, n, r2))
coresX[dim+1] = np.tensordot(rr, coresX[dim+1], 1)
else:
assert(0 < dim < len(coresX))
cc, rr = np.linalg.qr(reshape(cc, (r1, -1)).T)
r1 = cc.shape[1]
coresX[dim] = reshape(cc.T, (r1, n, r2))
coresX[dim-1] = np.tensordot(coresX[dim-1], rr.T, 1)
return coresX | TT-Tensor X orthogonalization step.
The function can change the shape of some cores. |
1,904 | def add_object_to_scope(self, obj):
if isinstance(obj, Computer):
self.add_object_to_path(obj, "scope/computers")
elif isinstance(obj, ComputerGroup):
self.add_object_to_path(obj, "scope/computer_groups")
elif isinstance(obj, Building):
self.add_object_to_path(obj, "scope/buildings")
elif isinstance(obj, Department):
self.add_object_to_path(obj, "scope/departments")
else:
raise TypeError | Add an object to the appropriate scope block.
Args:
obj: JSSObject to add to scope. Accepted subclasses are:
Computer
ComputerGroup
Building
Department
Raises:
TypeError if invalid obj type is provided. |
1,905 | def remove_breakpoint(self, py_db, filename, breakpoint_type, breakpoint_id):
python-linedjango-linejinja2-line
file_to_id_to_breakpoint = None
if breakpoint_type == :
breakpoints = py_db.breakpoints
file_to_id_to_breakpoint = py_db.file_to_id_to_line_breakpoint
elif py_db.plugin is not None:
result = py_db.plugin.get_breakpoints(py_db, breakpoint_type)
if result is not None:
file_to_id_to_breakpoint = py_db.file_to_id_to_plugin_breakpoint
breakpoints = result
if file_to_id_to_breakpoint is None:
pydev_log.critical(, breakpoint_type)
else:
try:
id_to_pybreakpoint = file_to_id_to_breakpoint.get(filename, {})
if DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS > 0:
existing = id_to_pybreakpoint[breakpoint_id]
pydev_log.info( % (
filename, existing.line, existing.func_name.encode(), breakpoint_id))
del id_to_pybreakpoint[breakpoint_id]
py_db.consolidate_breakpoints(filename, id_to_pybreakpoint, breakpoints)
if py_db.plugin is not None:
py_db.has_plugin_line_breaks = py_db.plugin.has_line_breaks()
except KeyError:
pydev_log.info("Error removing breakpoint: Breakpoint id not found: %s id: %s. Available ids: %s\n",
filename, breakpoint_id, dict_keys(id_to_pybreakpoint))
py_db.on_breakpoints_changed(removed=True) | :param str filename:
Note: must be already translated for the server.
:param str breakpoint_type:
One of: 'python-line', 'django-line', 'jinja2-line'.
:param int breakpoint_id: |
1,906 | def move_distance(self, distance_x_m, distance_y_m, distance_z_m,
velocity=VELOCITY):
distance = math.sqrt(distance_x_m * distance_x_m +
distance_y_m * distance_y_m +
distance_z_m * distance_z_m)
flight_time = distance / velocity
velocity_x = velocity * distance_x_m / distance
velocity_y = velocity * distance_y_m / distance
velocity_z = velocity * distance_z_m / distance
self.start_linear_motion(velocity_x, velocity_y, velocity_z)
time.sleep(flight_time)
self.stop() | Move in a straight line.
positive X is forward
positive Y is left
positive Z is up
:param distance_x_m: The distance to travel along the X-axis (meters)
:param distance_y_m: The distance to travel along the Y-axis (meters)
:param distance_z_m: The distance to travel along the Z-axis (meters)
:param velocity: the velocity of the motion (meters/second)
:return: |
1,907 | def load_env(print_vars=False):
env_file = os.environ.get(, )
try:
variables = open(env_file).read().splitlines()
for v in variables:
if in v:
key, value = v.split(, 1)
if key.startswith():
continue
if key not in os.environ:
if value.startswith() and value.endswith() or \
value.startswith(""):
os.environ[key] = ast.literal_eval(value)
else:
os.environ[key] = value
if print_vars:
print(key, os.environ[key])
except IOError:
pass | Load environment variables from a .env file, if present.
If an .env file is found in the working directory, and the listed
environment variables are not already set, they will be set according to
the values listed in the file. |
1,908 | def put(self, resource, **params):
return self._execute(self.session.put, , resource, **params) | Generic TeleSign REST API PUT handler.
:param resource: The partial resource URI to perform the request against, as a string.
:param params: Body params to perform the PUT request with, as a dictionary.
:return: The RestClient Response object. |
1,909 | def update_ontology(ont_url, rdf_path):
yaml_root = load_yaml_from_url(ont_url)
G = rdf_graph_from_yaml(yaml_root)
save_hierarchy(G, rdf_path) | Load an ontology formatted like Eidos' from github. |
1,910 | def from_localhost(self) -> bool:
sock_family = self.socket.family
if sock_family == _socket.AF_UNIX:
return True
elif sock_family not in (_socket.AF_INET, _socket.AF_INET6):
return False
sock_address, *_ = self.peername
ip = ipaddress.ip_address(sock_address)
if ip.version == 6 and ip.ipv4_mapped is not None:
ip = ipaddress.ip_address(ip.ipv4_mapped)
return ip.is_loopback | True if :attr:`.peername` is a connection from a ``localhost``
address. |
1,911 | def create_explicit(bounds):
safe_bounds = sorted(float(x) for x in bounds)
if len(safe_bounds) != len(set(safe_bounds)):
raise ValueError(u)
return sc_messages.Distribution(
bucketCounts=[0] * (len(safe_bounds) + 1),
explicitBuckets=sc_messages.ExplicitBuckets(bounds=safe_bounds)) | Creates a new instance of distribution with explicit buckets.
bounds is an iterable of ordered floats that define the explicit buckets
Args:
bounds (iterable[float]): initializes the bounds
Return:
:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`
Raises:
ValueError: if the args are invalid for creating an instance |
1,912 | def fit1d(samples, e, remove_zeros = False, **kw):
samples = samples[~np.isnan(samples)]
length = len(e)-1
hist,_ = np.histogramdd(samples, (e,))
hist = hist/sum(hist)
basis, knots = spline_base1d(length, marginal = hist, **kw)
non_zero = hist>0
model = linear_model.BayesianRidge()
if remove_zeros:
model.fit(basis[non_zero, :], hist[:,np.newaxis][non_zero,:])
else:
hist[~non_zero] = np.finfo(float).eps
model.fit(basis, hist[:,np.newaxis])
return model.predict(basis), hist, knots | Fits a 1D distribution with splines.
Input:
samples: Array
Array of samples from a probability distribution
e: Array
Edges that define the events in the probability
distribution. For example, e[0] < x <= e[1] is
the range of values that are associated with the
first event.
**kw: Arguments that are passed on to spline_bse1d.
Returns:
distribution: Array
An array that gives an estimate of probability for
events defined by e.
knots: Array
Sequence of knots that were used for the spline basis |
1,913 | def association_pivot(self, association_resource):
resource = self.copy()
resource._request_uri = .format(
association_resource.request_uri, resource._request_uri
)
return resource | Pivot point on association for this resource.
This method will return all *resources* (group, indicators, task, victims, etc) for this
resource that are associated with the provided resource.
**Example Endpoints URI's**
+---------+--------------------------------------------------------------------------------+
| METHOD | API Endpoint URI's |
+=========+================================================================================+
| GET | /v2/groups/{pivot resourceType}/{pivot uniqueId}/{resourceType} |
+---------+--------------------------------------------------------------------------------+
| GET | /v2/groups/{pivot resourceType}/{pivot uniqueId}/{resourceType}/{uniqueId} |
+---------+--------------------------------------------------------------------------------+
| POST | /v2/groups/{pivot resourceType}/{pivot uniqueId}/{resourceType}/{uniqueId} |
+---------+--------------------------------------------------------------------------------+
| GET | /v2/indicators/{pivot resourceType}/{pivot uniqueId}/{resourceType} |
+---------+--------------------------------------------------------------------------------+
| GET | /v2/indicators/{pivot resourceType}/{pivot uniqueId}/{resourceType}/{uniqueId} |
+---------+--------------------------------------------------------------------------------+
| POST | /v2/indicator/{pivot resourceType}/{pivot uniqueId}/{resourceType}/{uniqueId} |
+---------+--------------------------------------------------------------------------------+
Args:
resource_api_branch (string): The resource pivot api branch including resource id. |
1,914 | def needs_manager_helps():
message = m.Message()
message.add(m.Brand())
message.add(heading())
message.add(content())
return message | Help message for Batch Dialog.
.. versionadded:: 3.2.1
:returns: A message object containing helpful information.
:rtype: messaging.message.Message |
1,915 | def key_absent(name, region=None, key=None, keyid=None, profile=None):
ret = {: name,
: True,
: ,
: {}
}
exists = __salt__[](name, region, key, keyid, profile)
if exists:
if __opts__[]:
ret[] = .format(name)
ret[] = None
return ret
deleted = __salt__[](name, region,
key, keyid,
profile)
log.debug(, deleted)
if deleted:
ret[] = True
ret[] = .format(name)
ret[][] = name
else:
ret[] = False
ret[] = .format(name)
else:
ret[] = True
ret[] = .format(name)
return ret | Deletes a key pair |
1,916 | def titlecase(text, callback=None, small_first_last=True):
lines = re.split(, text)
processed = []
for line in lines:
all_caps = line.upper() == line
words = re.split(, line)
tc_line = []
for word in words:
if callback:
new_word = callback(word, all_caps=all_caps)
if new_word:
tc_line.append(_mark_immutable(new_word))
continue
if all_caps:
if UC_INITIALS.match(word):
tc_line.append(word)
continue
if APOS_SECOND.match(word):
if len(word[0]) == 1 and word[0] not in :
word = word[0].lower() + word[1] + word[2].upper() + word[3:]
else:
word = word[0].upper() + word[1] + word[2].upper() + word[3:]
tc_line.append(word)
continue
match = MAC_MC.match(word)
if match:
tc_line.append("%s%s" % (match.group(1).capitalize(),
titlecase(match.group(2),callback,small_first_last)))
continue
if INLINE_PERIOD.search(word) or (not all_caps and UC_ELSEWHERE.match(word)):
tc_line.append(word)
continue
if SMALL_WORDS.match(word):
tc_line.append(word.lower())
continue
if "/" in word and "//" not in word:
slashed = map(
lambda t: titlecase(t,callback,False),
word.split()
)
tc_line.append("/".join(slashed))
continue
if in word:
hyphenated = map(
lambda t: titlecase(t,callback,small_first_last),
word.split()
)
tc_line.append("-".join(hyphenated))
continue
if all_caps:
word = word.lower()
tc_line.append(CAPFIRST.sub(lambda m: m.group(0).upper(), word))
if small_first_last and tc_line:
if not isinstance(tc_line[0], Immutable):
tc_line[0] = SMALL_FIRST.sub(lambda m: % (
m.group(1),
m.group(2).capitalize()
), tc_line[0])
if not isinstance(tc_line[-1], Immutable):
tc_line[-1] = SMALL_LAST.sub(
lambda m: m.group(0).capitalize(), tc_line[-1]
)
result = " ".join(tc_line)
result = SUBPHRASE.sub(lambda m: % (
m.group(1),
m.group(2).capitalize()
), result)
processed.append(result)
return "\n".join(processed) | Titlecases input text
This filter changes all words to Title Caps, and attempts to be clever
about *un*capitalizing SMALL words like a/an/the in the input.
The list of "SMALL words" which are not capped comes from
the New York Times Manual of Style, plus 'vs' and 'v'. |
1,917 | def p_namelist(self,t):
"namelist : namelist NAME \n | NAME"
if len(t)==2: t[0] = [t[1]]
elif len(t)==4: t[0] = t[1] + [t[3]]
else: raise NotImplementedError(,len(t)) | namelist : namelist ',' NAME \n | NAME |
1,918 | def upgrade(refresh=True, **kwargs):
<package>old<old-version>new<new-version>*
ret = {: {},
: True,
: ,
}
old = list_pkgs()
if salt.utils.data.is_true(refresh):
refresh_db()
result = _call_brew(, failhard=False)
__context__.pop(, None)
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
if result[] != 0:
raise CommandExecutionError(
,
info={: ret, : result}
)
return ret | Upgrade outdated, unpinned brews.
refresh
Fetch the newest version of Homebrew and all formulae from GitHub before installing.
Returns a dictionary containing the changes:
.. code-block:: python
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade |
1,919 | def create_tomodir(self, directory):
pwd = os.getcwd()
if not os.path.isdir(directory):
os.makedirs(directory)
os.chdir(directory)
directories = (
,
,
,
,
,
,
,
)
for directory in directories:
if not os.path.isdir(directory):
os.makedirs(directory)
os.chdir(pwd) | Create a tomodir subdirectory structure in the given directory |
1,920 | def _GetMountpoints(only_physical=True):
partitions = psutil.disk_partitions(all=not only_physical)
return set(partition.mountpoint for partition in partitions) | Fetches a list of mountpoints.
Args:
only_physical: Determines whether only mountpoints for physical devices
(e.g. hard disks) should be listed. If false, mountpoints for things such
as memory partitions or `/dev/shm` will be returned as well.
Returns:
A set of mountpoints. |
1,921 | def getaddress(self):
self.commentlist = []
self.gotonext()
oldpos = self.pos
oldcl = self.commentlist
plist = self.getphraselist()
self.gotonext()
returnlist = []
if self.pos >= len(self.field):
if plist:
returnlist = [(SPACE.join(self.commentlist), plist[0])]
elif self.field[self.pos] in :
self.pos += 1
return returnlist | Parse the next address. |
1,922 | def count_lines_in_file(self, fname=):
i = 0
if fname == :
fname = self.fullname
try:
with codecs.open(fname, "r",encoding=, errors=) as f:
for i, _ in enumerate(f):
pass
return i + 1
except Exception as ex:
print(, fname, , str(ex))
return 0 | you wont believe what this method does |
1,923 | def write_default_config(self, filename):
try:
with open(filename, ) as file:
file.write(DEFAULT_CONFIG)
return True
except (IOError, OSError) as e:
print( % (filename, e.strerror or e), file=sys.stderr)
return False | Write the default config file. |
1,924 | def report(self, score_map, type="valid", epoch=-1, new_best=False):
type_str = type
if len(type_str) < 5:
type_str += " " * (5 - len(type_str))
info = " ".join("%s=%.2f" % el for el in score_map.items())
current_epoch = epoch if epoch > 0 else self.current_epoch()
epoch_str = "epoch={}".format(current_epoch + 1)
if epoch < 0:
epoch_str = "dryrun"
sys.stdout.write("\r")
sys.stdout.flush()
marker = " *" if new_best else ""
message = "{} ({}) {}{}".format(type_str, epoch_str, info, marker)
self.network.train_logger.record(message)
logging.info(message) | Report the scores and record them in the log. |
1,925 | def resize(self, width, height, **kwargs):
opts = Image._normalize_options(kwargs)
size = self._get_size(width, height)
if opts["mode"] == "adapt":
self._adapt(size, opts)
elif opts["mode"] == "clip":
self._clip(size, opts)
elif opts["mode"] == "fill":
self._fill(size, opts)
elif opts["mode"] == "scale":
self._scale(size, opts)
else:
self._crop(size, opts)
return self | Resizes the image to the supplied width/height. Returns the
instance. Supports the following optional keyword arguments:
mode - The resizing mode to use, see Image.MODES
filter - The filter to use: see Image.FILTERS
background - The hexadecimal background fill color, RGB or ARGB
position - The position used to crop: see Image.POSITIONS for
pre-defined positions or a custom position ratio
retain - The minimum percentage of the original image to retain
when cropping |
1,926 | def MobileDeviceApplication(self, data=None, subset=None):
return self.factory.get_object(jssobjects.MobileDeviceApplication,
data, subset) | {dynamic_docstring} |
1,927 | def get_string_resources(self, package_name, locale=):
self._analyse()
buff =
buff +=
try:
for i in self.values[package_name][locale]["string"]:
if any(map(i[1].__contains__, )):
value = % i[1]
else:
value = i[1]
buff += .format(i[0], value)
except KeyError:
pass
buff +=
return buff.encode() | Get the XML (as string) of all resources of type 'string'.
Read more about string resources:
https://developer.android.com/guide/topics/resources/string-resource.html
:param package_name: the package name to get the resources for
:param locale: the locale to get the resources for (default: '\x00\x00') |
1,928 | def _get_data_by_field(self, field_number):
if not self.is_data_loaded:
self._import_data()
if not 0 <= field_number < self._num_of_fields:
raise ValueError("Field number should be between 0-%d" % self._num_of_fields)
return self._data[field_number] | Return a data field by field number.
This is a useful method to get the values for fields that Ladybug
currently doesn't import by default. You can find list of fields by typing
EPWFields.fields
Args:
field_number: a value between 0 to 34 for different available epw fields.
Returns:
An annual Ladybug list |
1,929 | def check_messages(*messages: str) -> Callable:
def store_messages(func):
func.checks_msgs = messages
return func
return store_messages | decorator to store messages that are handled by a checker method |
1,930 | def create(self):
steps = [
(self.create_role, (), {}),
(self.create_vpc, (), {}),
(self.create_cluster, (), {}),
(self.create_node_group, (), {}),
(self.create_spot_nodes, (), {}),
(self.create_utilities, (), {}),
]
for step in tqdm.tqdm(steps, ncols=70):
method, args, kwargs = step
method(*args, **kwargs) | Deploy a cluster on Amazon's EKS Service configured
for Jupyterhub Deployments. |
1,931 | def _submit_metrics(self, metrics, metric_name_and_type_by_property):
for metric in metrics:
if (
metric.name not in metric_name_and_type_by_property
and metric.name.lower() not in metric_name_and_type_by_property
):
continue
if metric_name_and_type_by_property.get(metric.name):
metric_name, metric_type = metric_name_and_type_by_property[metric.name]
elif metric_name_and_type_by_property.get(metric.name.lower()):
metric_name, metric_type = metric_name_and_type_by_property[metric.name.lower()]
else:
continue
try:
func = getattr(self, metric_type.lower())
except AttributeError:
raise Exception(u"Invalid metric type: {0}".format(metric_type))
func(metric_name, metric.value, metric.tags) | Resolve metric names and types and submit it. |
1,932 | def netconf_config_change_changed_by_server_or_user_server_server(self, **kwargs):
config = ET.Element("config")
netconf_config_change = ET.SubElement(config, "netconf-config-change", xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-notifications")
changed_by = ET.SubElement(netconf_config_change, "changed-by")
server_or_user = ET.SubElement(changed_by, "server-or-user")
server = ET.SubElement(server_or_user, "server")
server = ET.SubElement(server, "server")
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
1,933 | def hr_dp996(self, value=None):
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
.format(value))
self._hr_dp996 = value | Corresponds to IDD Field `hr_dp996`
humidity ratio, calculated at standard atmospheric pressure
at elevation of station, corresponding to
Dew-point temperature corresponding to 99.6% annual cumulative
frequency of occurrence (cold conditions)
Args:
value (float): value for IDD Field `hr_dp996`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value |
1,934 | def items(self):
if ver == (2, 7):
return self.viewitems()
elif ver == (2, 6):
return self.iteritems()
elif ver >= (3, 0):
return self.items() | On Python 2.7+:
D.items() -> a set-like object providing a view on D's items
On Python 2.6:
D.items() -> an iterator over D's items |
1,935 | def _read_image_slice(self, arg):
if not in self._info:
raise ValueError("Attempt to slice empty extension")
if isinstance(arg, slice):
return self._read_image_slice((arg,))
if not isinstance(arg, tuple):
raise ValueError("arguments must be slices, one for each "
"dimension, e.g. [2:5] or [2:5,8:25] etc.")
nd = len(arg)
if nd != self._info[]:
raise ValueError("Got slice dimensions %d, "
"expected %d" % (nd, self._info[]))
targ = arg
arg = []
for a in targ:
if isinstance(a, slice):
arg.append(a)
elif isinstance(a, int):
arg.append(slice(a, a+1, 1))
else:
raise ValueError("arguments must be slices, e.g. 2:12")
dims = self._info[]
arrdims = []
first = []
last = []
steps = []
dim = 0
for slc in arg:
start = slc.start
stop = slc.stop
step = slc.step
if start is None:
start = 0
if stop is None:
stop = dims[dim]
if step is None:
step = 1
if step < 1:
raise ValueError("slice steps must be >= 1")
if start < 0:
start = dims[dim] + start
if start < 0:
raise IndexError("Index out of bounds")
if stop < 0:
stop = dims[dim] + start + 1
start = start + 1
if stop < start:
raise ValueError("python slices but include at least one "
"element, got %s" % slc)
if stop > dims[dim]:
stop = dims[dim]
first.append(start)
last.append(stop)
steps.append(step)
arrdims.append(stop-start+1)
dim += 1
first.reverse()
last.reverse()
steps.reverse()
first = numpy.array(first, dtype=)
last = numpy.array(last, dtype=)
steps = numpy.array(steps, dtype=)
npy_dtype = self._get_image_numpy_dtype()
array = numpy.zeros(arrdims, dtype=npy_dtype)
self._FITS.read_image_slice(self._ext+1, first, last, steps, array)
return array | workhorse to read a slice |
1,936 | def _push_condition(predicate):
global _depth
_check_under_condition()
_depth += 1
if predicate is not otherwise and len(predicate) > 1:
raise PyrtlError()
_conditions_list_stack[-1].append(predicate)
_conditions_list_stack.append([]) | As we enter new conditions, this pushes them on the predicate stack. |
1,937 | def validate(self, ticket=None):
qs = self.filter(validation__isnull=True).check_groupable()
if qs.count() == 0:
return []
qs.order_by(, )._assign_numbers()
return qs._validate(ticket) | Validates all receipts matching this queryset.
Note that, due to how AFIP implements its numbering, this method is not
thread-safe, or even multiprocess-safe.
Because of this, it is possible that not all instances matching this
queryset are validated properly. Obviously, only successfully validated
receipts will be updated.
Returns a list of errors as returned from AFIP's webservices. An
exception is not raised because partial failures are possible.
Receipts that succesfully validate will have a
:class:`~.ReceiptValidation` object attatched to them with a validation
date and CAE information.
Already-validated receipts are ignored.
Attempting to validate an empty queryset will simply return an empty
list. |
1,938 | def ssh_check_mic(self, mic_token, session_id, username=None):
self._session_id = session_id
self._username = username
if username is not None:
mic_field = self._ssh_build_mic(
self._session_id,
self._username,
self._service,
self._auth_method,
)
self._gss_srv_ctxt.verify(mic_field, mic_token)
else:
self._gss_ctxt.verify(self._session_id, mic_token) | Verify the MIC token for a SSH2 message.
:param str mic_token: The MIC token received from the client
:param str session_id: The SSH session ID
:param str username: The name of the user who attempts to login
:return: None if the MIC check was successful
:raises: ``sspi.error`` -- if the MIC check failed |
1,939 | def check_roles(self, account, aws_policies, aws_roles):
self.log.debug(.format(account.account_name))
max_session_duration = self.dbconfig.get(, self.ns, 8) * 60 * 60
sess = get_aws_session(account)
iam = sess.client()
account_roles = copy.deepcopy(self.cfg_roles)
if account.account_name in self.git_policies:
for role in self.git_policies[account.account_name]:
if role in account_roles:
account_roles[role][] += list(self.git_policies[account.account_name][role].keys())
for role_name, data in list(account_roles.items()):
if role_name not in aws_roles:
iam.create_role(
Path=,
RoleName=role_name,
AssumeRolePolicyDocument=json.dumps(data[], indent=4),
MaxSessionDuration=max_session_duration
)
self.log.info(.format(account.account_name, role_name))
else:
try:
if aws_roles[role_name][] != max_session_duration:
iam.update_role(
RoleName=aws_roles[role_name][],
MaxSessionDuration=max_session_duration
)
self.log.info(.format(
role_name,
account.account_name,
max_session_duration
))
except ClientError:
self.log.exception(.format(
role_name,
account.account_name
))
aws_role_policies = [x[] for x in iam.list_attached_role_policies(
RoleName=role_name)[]
]
aws_role_inline_policies = iam.list_role_policies(RoleName=role_name)[]
cfg_role_policies = data[]
missing_policies = list(set(cfg_role_policies) - set(aws_role_policies))
extra_policies = list(set(aws_role_policies) - set(cfg_role_policies))
if aws_role_inline_policies:
self.log.info(.format(
role_name,
account.account_name,
.join(aws_role_inline_policies)
))
if self.dbconfig.get(, self.ns, False) and self.manage_roles:
for policy in aws_role_inline_policies:
iam.delete_role_policy(RoleName=role_name, PolicyName=policy)
auditlog(
event=,
actor=self.ns,
data={
: account.account_name,
: role_name,
: policy
}
)
if missing_policies:
self.log.info(.format(
role_name,
account.account_name,
.join(missing_policies)
))
if self.manage_roles:
for policy in missing_policies:
iam.attach_role_policy(RoleName=role_name, PolicyArn=aws_policies[policy][])
auditlog(
event=,
actor=self.ns,
data={
: account.account_name,
: role_name,
: aws_policies[policy][]
}
)
if extra_policies:
self.log.info(.format(
role_name,
account.account_name,
.join(extra_policies)
))
for policy in extra_policies:
if policy in aws_policies:
polArn = aws_policies[policy][]
elif policy in self.aws_managed_policies:
polArn = self.aws_managed_policies[policy][]
else:
polArn = None
self.log.info(.format(
role_name,
account.account_name,
policy
))
if self.manage_roles and polArn:
iam.detach_role_policy(RoleName=role_name, PolicyArn=polArn)
auditlog(
event=,
actor=self.ns,
data={
: account.account_name,
: role_name,
: polArn
}
) | Iterate through the roles of a specific account and create or update the roles if they're missing or
does not match the roles from Git.
Args:
account (:obj:`Account`): The account to check roles on
aws_policies (:obj:`dict` of `str`: `dict`): A dictionary containing all the policies for the specific
account
aws_roles (:obj:`dict` of `str`: `dict`): A dictionary containing all the roles for the specific account
Returns:
`None` |
1,940 | def updateCurrentValue(self, value):
xsnap = None
ysnap = None
if value != self.endValue():
xsnap = self.targetObject().isXSnappedToGrid()
ysnap = self.targetObject().isYSnappedToGrid()
self.targetObject().setXSnapToGrid(False)
self.targetObject().setYSnapToGrid(False)
super(XNodeAnimation, self).updateCurrentValue(value)
if value != self.endValue():
self.targetObject().setXSnapToGrid(xsnap)
self.targetObject().setYSnapToGrid(ysnap) | Disables snapping during the current value update to ensure a smooth
transition for node animations. Since this can only be called via
code, we don't need to worry about snapping to the grid for a user. |
1,941 | def min_rank(series, ascending=True):
ranks = series.rank(method=, ascending=ascending)
return ranks | Equivalent to `series.rank(method='min', ascending=ascending)`.
Args:
series: column to rank.
Kwargs:
ascending (bool): whether to rank in ascending order (default is `True`). |
1,942 | def parse_sidebar(self, user_page):
user_info = {}
except:
if not self.session.suppress_parse_exceptions:
raise
return user_info | Parses the DOM and returns user attributes in the sidebar.
:type user_page: :class:`bs4.BeautifulSoup`
:param user_page: MAL user page's DOM
:rtype: dict
:return: User attributes
:raises: :class:`.InvalidUserError`, :class:`.MalformedUserPageError` |
1,943 | def process_request(self, request, client_address):
self.finish_request(request, client_address)
self.shutdown_request(request) | Call finish_request. |
1,944 | def get_start_and_end_time(self, ref=None):
now = time.localtime(ref)
if self.syear == 0:
self.syear = now.tm_year
day_start = find_day_by_weekday_offset(self.syear, self.smon, self.swday, self.swday_offset)
start_time = get_start_of_day(self.syear, self.smon, day_start)
if self.eyear == 0:
self.eyear = now.tm_year
day_end = find_day_by_weekday_offset(self.eyear, self.emon, self.ewday, self.ewday_offset)
end_time = get_end_of_day(self.eyear, self.emon, day_end)
now_epoch = time.mktime(now)
if start_time > end_time:
if now_epoch > end_time:
day_end = find_day_by_weekday_offset(self.eyear + 1,
self.emon, self.ewday, self.ewday_offset)
end_time = get_end_of_day(self.eyear + 1, self.emon, day_end)
else:
day_start = find_day_by_weekday_offset(self.syear - 1,
self.smon, self.swday, self.swday_offset)
start_time = get_start_of_day(self.syear - 1, self.smon, day_start)
else:
if now_epoch > end_time:
day_start = find_day_by_weekday_offset(self.syear + 1,
self.smon, self.swday, self.swday_offset)
start_time = get_start_of_day(self.syear + 1, self.smon, day_start)
day_end = find_day_by_weekday_offset(self.eyear + 1,
self.emon, self.ewday, self.ewday_offset)
end_time = get_end_of_day(self.eyear + 1, self.emon, day_end)
return (start_time, end_time) | Specific function to get start time and end time for MonthWeekDayDaterange
:param ref: time in seconds
:type ref: int | None
:return: tuple with start and end time
:rtype: tuple |
1,945 | def cmd(send, msg, args):
args[].query(Permissions).update({"registered": False})
args[].get_admins()
send("Verified admins reset.") | Clears the verified admin list
Syntax: {command} |
1,946 | def get_context(self, url, expiration):
self._feed = self.get(url, expiration)
return {
self.feed_context_name: self.format_feed_content(self._feed),
} | Build template context with formatted feed content |
1,947 | def gdaldem_mem_ma(ma, ds=None, res=None, extent=None, srs=None, processing=, returnma=False, computeEdges=False):
if ds is None:
ds = mem_ds(res, extent, srs=None, dtype=gdal.GDT_Float32)
else:
ds = mem_ds_copy(ds)
b = ds.GetRasterBand(1)
b.WriteArray(ma)
out = gdaldem_mem_ds(ds, processing=processing, returnma=returnma)
return out | Wrapper to allow gdaldem calculations for arbitrary NumPy masked array input
Untested, work in progress placeholder
Should only need to specify res, can caluclate local gt, cartesian srs |
1,948 | def check_file(self, filename):
can_read = super(SecuredConfig, self).check_file(filename)
if not can_read:
return False
mode = get_stat(filename).st_mode
if (mode & stat.S_IRGRP) or (mode & stat.S_IROTH):
msg = "File %r is not secure enough. Change it's mode to 600"
self._log.warning(msg, filename)
return False
return True | Overrides :py:meth:`.Config.check_file` |
1,949 | def get_subnets_count(context, filters=None):
LOG.info("get_subnets_count for tenant %s with filters %s" %
(context.tenant_id, filters))
return db_api.subnet_count_all(context, **filters) | Return the number of subnets.
The result depends on the identity of the user making the request
(as indicated by the context) as well as any filters.
: param context: neutron api request context
: param filters: a dictionary with keys that are valid keys for
a network as listed in the RESOURCE_ATTRIBUTE_MAP object
in neutron/api/v2/attributes.py. Values in this dictiontary
are an iterable containing values that will be used for an exact
match comparison for that value. Each result returned by this
function will have matched one of the values for each key in
filters.
NOTE: this method is optional, as it was not part of the originally
defined plugin API. |
1,950 | def fix_repeat_dt(dt_list, offset_s=0.001):
idx = (np.diff(dt_list) == timedelta(0))
while np.any(idx):
dt_list[idx.nonzero()[0] + 1] += timedelta(seconds=offset_s)
idx = (np.diff(dt_list) == timedelta(0))
return dt_list | Add some small offset to remove duplicate times
Needed for xarray interp, which expects monotonically increasing times |
1,951 | def run_timeit(self, stmt, setup):
_timer = timeit.Timer(stmt=stmt, setup=setup)
trials = _timer.repeat(self.timeit_repeat, self.timeit_number)
self.time_average_seconds = sum(trials) / len(trials) / self.timeit_number
time_avg = convert_time_units(self.time_average_seconds)
return time_avg | Create the function call statement as a string used for timeit. |
1,952 | def password_get(username=None):
password = keyring.get_password(, username)
if password is None:
split_username = tuple(username.split())
msg = ("Couldnascii') | Retrieves a password from the keychain based on the environment and
configuration parameter pair.
If this fails, None is returned. |
1,953 | def status_line(self):
date = self.date_published
status = self.state.title()
if self.state == self.DRAFT:
status = "Draft saved"
date = self.last_save
if date and self.last_save == self.last_scheduled:
if self.v_last_save:
if self.last_scheduled >= self.v_last_save:
status = self.PUBLISHED.title()
if self.last_scheduled > self.v_last_save:
status = "Publish Scheduled"
else:
status = "Publish Scheduled"
date = self.date_published
if date:
status = "%s: %s" % (status, formats.date_format(date, "SHORT_DATE_FORMAT"))
return status | Returns a status line for an item.
Only really interesting when called for a draft
item as it can tell you if the draft is the same as
another version. |
1,954 | def autoparal_run(self):
policy = self.manager.policy
if policy.autoparal == 0:
logger.info("Nothing to do in autoparal, returning (None, None)")
return 0
if policy.autoparal != 1:
raise NotImplementedError("autoparal != 1")
max_ncpus = self.manager.max_cores
if max_ncpus == 1: return 0
autoparal_vars = dict(autoparal=policy.autoparal, max_ncpus=max_ncpus, mem_test=0)
self.set_vars(autoparal_vars)
time.sleep(5)
try:
pconfs = parser.parse(self.output_file.path)
except parser.Error:
logger.critical("Error while parsing Autoparal section:\n%s" % straceback())
return 2
optconf = self.find_optconf(pconfs)
self.set_vars(optconf.vars)
d = pconfs.as_dict()
d["optimal_conf"] = optconf
json_pretty_dump(d, os.path.join(self.workdir, "autoparal.json"))
self.set_status(self.S_INIT, msg=)
os.remove(self.output_file.path)
os.remove(self.log_file.path)
os.remove(self.stderr_file.path)
return 0 | Find an optimal set of parameters for the execution of the task
This method can change the ABINIT input variables and/or the
submission parameters e.g. the number of CPUs for MPI and OpenMp.
Set:
self.pconfs where pconfs is a :class:`ParalHints` object with the configuration reported by
autoparal and optimal is the optimal configuration selected.
Returns 0 if success |
1,955 | def from_pubkey(cls: Type[CRCPubkeyType], pubkey: str) -> CRCPubkeyType:
hash_root = hashlib.sha256()
hash_root.update(base58.b58decode(pubkey))
hash_squared = hashlib.sha256()
hash_squared.update(hash_root.digest())
b58_checksum = ensure_str(base58.b58encode(hash_squared.digest()))
crc = b58_checksum[:3]
return cls(pubkey, crc) | Return CRCPubkey instance from public key string
:param pubkey: Public key
:return: |
1,956 | def get_output(src):
output =
lines = open(src.path, ).readlines()
for line in lines:
m = re.match(config.import_regex,line)
if m:
include_path = os.path.abspath(src.dir + + m.group());
if include_path not in config.sources:
script = Script(include_path)
script.parents.append(src)
config.sources[script.path] = script
include_file = config.sources[include_path]
if include_file not in config.stack or m.group() == :
config.stack.append(include_file)
output += get_output(include_file)
else:
output += line
return output | parse lines looking for commands |
1,957 | def do_transition_for(brain_or_object, transition):
if not isinstance(transition, basestring):
fail("Transition type needs to be string, got " % type(transition))
obj = get_object(brain_or_object)
ploneapi.content.transition(obj, transition)
return obj | Performs a workflow transition for the passed in object.
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:returns: The object where the transtion was performed |
1,958 | def as_string(value):
if six.PY2:
buffer_types = buffer, memoryview
else:
buffer_types = memoryview
if value is None:
return u
elif isinstance(value, buffer_types):
return bytes(value).decode(, )
elif isinstance(value, bytes):
return value.decode(, )
else:
return six.text_type(value) | Convert a value to a Unicode object for matching with a query.
None becomes the empty string. Bytestrings are silently decoded. |
1,959 | def expand_cause_repertoire(self, new_purview=None):
return self.subsystem.expand_cause_repertoire(
self.cause.repertoire, new_purview) | See |Subsystem.expand_repertoire()|. |
1,960 | def visibleCount(self):
return sum(int(not self.item(i).isHidden()) for i in range(self.count())) | Returns the number of visible items in this list.
:return <int> |
1,961 | def fit_predict(self, y, exogenous=None, n_periods=10, **fit_args):
self.fit(y, exogenous, **fit_args)
return self.predict(n_periods=n_periods, exogenous=exogenous) | Fit an ARIMA to a vector, ``y``, of observations with an
optional matrix of ``exogenous`` variables, and then generate
predictions.
Parameters
----------
y : array-like or iterable, shape=(n_samples,)
The time-series to which to fit the ``ARIMA`` estimator. This may
either be a Pandas ``Series`` object (statsmodels can internally
use the dates in the index), or a numpy array. This should be a
one-dimensional array of floats, and should not contain any
``np.nan`` or ``np.inf`` values.
exogenous : array-like, shape=[n_obs, n_vars], optional (default=None)
An optional 2-d array of exogenous variables. If provided, these
variables are used as additional features in the regression
operation. This should not include a constant or trend. Note that
if an ``ARIMA`` is fit on exogenous features, it must be provided
exogenous features for making predictions.
n_periods : int, optional (default=10)
The number of periods in the future to forecast.
fit_args : dict or kwargs, optional (default=None)
Any keyword args to pass to the fit method. |
1,962 | def REV(self, params):
Ra, Rb = self.get_two_parameters(self.TWO_PARAMETER_COMMA_SEPARATED, params)
self.check_arguments(low_registers=(Ra, Rb))
def REV_func():
self.register[Ra] = ((self.register[Rb] & 0xFF000000) >> 24) | \
((self.register[Rb] & 0x00FF0000) >> 8) | \
((self.register[Rb] & 0x0000FF00) << 8) | \
((self.register[Rb] & 0x000000FF) << 24)
return REV_func | REV Ra, Rb
Reverse the byte order in register Rb and store the result in Ra |
1,963 | def get_schema_input_format(self, **kwargs):
config = ET.Element("config")
get_schema = ET.Element("get_schema")
config = get_schema
input = ET.SubElement(get_schema, "input")
format = ET.SubElement(input, "format")
format.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
1,964 | def execute(self, points, *args, **kwargs):
if isinstance(self.model, OrdinaryKriging) or \
isinstance(self.model, OrdinaryKriging3D):
prediction, variance = \
self.model.execute(,
n_closest_points=self.n_closest_points,
backend=,
**points)
else:
print()
prediction, variance = \
self.model.execute(, backend=, **points)
return prediction, variance | Parameters
----------
points: dict
Returns:
-------
Prediction array
Variance array |
1,965 | def call_ck(i):
import subprocess
import re
action=i.get(,)
if action==:
return {:1, :}
if not re.match(, action):
return {:1, :}
fd, fn=tempfile.mkstemp(suffix=, prefix=)
os.close(fd)
dc=i.get(,)
if dc==: i[]=
rr={:0}
rr[]=
rr[]=
rx=ck.save_json_to_file({:fn, :i})
if rx[]>0: return rx
cmd=+action++fn
if dc==:
rx=ck.get_os_ck({})
if rx[]>0: return rx
plat=rx[]
dci=ck.cfg.get(,{}).get(plat,{})
dcmd=dci.get(,)
if dcmd==:
return {:1, :}
dcmd=dcmd.replace(, cmd)
if dci.get(,)==:
process=subprocess.Popen(dcmd, stdin=None, stdout=None, stderr=None, shell=True, close_fds=True, creationflags=subprocess.CREATE_NEW_CONSOLE)
else:
try:
pid=os.fork()
except OSError as e:
return {:1, :+format(e)+}
if pid==0:
os.setsid()
pid=os.fork()
if pid!=0: os._exit(0)
try:
maxfd=os.sysconf("SC_OPEN_MAX")
except (AttributeError, ValueError):
maxfd=1024
for fd in range(maxfd):
try:
os.close(fd)
except OSError:
pass
os.open(, os.O_RDWR)
os.dup2(0, 1)
os.dup2(0, 2)
process=os.system(dcmd)
os._exit(0)
stdout=ck.cfg.get(, )
stderr=
else:
process=subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdout,stderr=process.communicate()
try: stdout=stdout.decode()
except Exception as e: pass
try: stderr=stderr.decode()
except Exception as e: pass
rr[]=stdout+stderr
rr[]=stdout
rr[]=stderr
return rr | Input: {
Input for CK
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
(stdout) - stdout, if available
(stderr) - stderr, if available
(std) - stdout+stderr
} |
1,966 | def instanceStarted(self, *args, **kwargs):
return self._makeApiCall(self.funcinfo["instanceStarted"], *args, **kwargs) | Report an instance starting
An instance will report in by giving its instance id as well
as its security token. The token is given and checked to ensure
that it matches a real token that exists to ensure that random
machines do not check in. We could generate a different token
but that seems like overkill
This method is ``stable`` |
1,967 | def instancelist(obj_list, check=False, shared_attrs=None):
class InstanceList_(object):
def __init__(self, obj_list, shared_attrs=None):
self._obj_list = []
self._shared_public_attrs = []
self._example_type = None
if len(obj_list) > 0:
import utool as ut
self._obj_list = obj_list
example_obj = obj_list[0]
example_type = type(example_obj)
self._example_type = example_type
if shared_attrs is None:
if check:
attrsgen = [set(dir(obj)) for obj in obj_list]
shared_attrs = list(reduce(set.intersection, attrsgen))
else:
shared_attrs = dir(example_obj)
allowed = []
self._shared_public_attrs = [
a for a in shared_attrs
if a in allowed or not a.startswith()
]
for attrname in self._shared_public_attrs:
attrtype = getattr(example_type, attrname, None)
if attrtype is not None and isinstance(attrtype, property):
setattr(InstanceList_, attrname,
property(self._define_prop(attrname)))
else:
func = self._define_func(attrname)
ut.inject_func_as_method(self, func, attrname)
def __nice__(self):
if self._example_type is None:
typename =
else:
typename = self._example_type.__name__
return % (len(self._obj_list), typename)
def __repr__(self):
classname = self.__class__.__name__
devnice = self.__nice__()
return % (classname, devnice, hex(id(self)))
def __str__(self):
classname = self.__class__.__name__
devnice = self.__nice__()
return % (classname, devnice)
def __getitem__(self, key):
return self._map_method(, key)
def _define_func(self, attrname):
import utool as ut
def _wrapper(self, *args, **kwargs):
return self._map_method(attrname, *args, **kwargs)
ut.set_funcname(_wrapper, attrname)
return _wrapper
def _map_method(self, attrname, *args, **kwargs):
mapped_vals = [getattr(obj, attrname)(*args, **kwargs)
for obj in self._obj_list]
return mapped_vals
def _define_prop(self, attrname):
import utool as ut
def _getter(self):
return self._map_property(attrname)
ut.set_funcname(_getter, + attrname)
return _getter
def _map_property(self, attrname):
mapped_vals = [getattr(obj, attrname) for obj in self._obj_list]
return mapped_vals
return InstanceList_(obj_list, shared_attrs) | Executes methods and attribute calls on a list of objects of the same type
Bundles a list of object of the same type into a single object.
The new object contains the same functions as each original object
but applies them to each element of the list independantly when called.
CommandLine:
python -m utool.util_dev instancelist
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dev import * # NOQA
>>> import utool as ut
>>> obj_list = ['hi', 'bye', 'foo']
>>> self = ut.instancelist(obj_list, check=False)
>>> print(self)
>>> print(self.upper())
>>> print(self.isalpha()) |
1,968 | def _head(self, uri):
resp, resp_body = self.api.method_head(uri)
return resp | Handles the communication with the API when performing a HEAD request
on a specific resource managed by this class. Returns the headers
contained in the response. |
1,969 | def _get_and_assert_slice_param(url_dict, param_name, default_int):
param_str = url_dict[].get(param_name, default_int)
try:
n = int(param_str)
except ValueError:
raise d1_common.types.exceptions.InvalidRequest(
0,
.format(
param_name, param_str
),
)
if n < 0:
raise d1_common.types.exceptions.InvalidRequest(
0,
.format(
param_name, param_str
),
)
return n | Return ``param_str`` converted to an int.
If str cannot be converted to int or int is not zero or positive, raise
InvalidRequest. |
1,970 | def inputs(ctx, client, revision, paths):
r
from renku.models.provenance import ProcessRun
graph = Graph(client)
paths = set(paths)
nodes = graph.build(revision=revision)
commits = {node.commit for node in nodes}
candidates = {(node.commit, node.path)
for node in nodes if not paths or node.path in paths}
input_paths = set()
for commit in commits:
activity = graph.activities[commit]
if isinstance(activity, ProcessRun):
for usage in activity.qualified_usage:
for entity in usage.entity.entities:
path = str((usage.client.path / entity.path).relative_to(
client.path
))
usage_key = (entity.commit, entity.path)
if path not in input_paths and usage_key in candidates:
input_paths.add(path)
click.echo(.join(graph._format_path(path) for path in input_paths))
ctx.exit(0 if not paths or len(input_paths) == len(paths) else 1) | r"""Show inputs files in the repository.
<PATHS> Files to show. If no files are given all input files are shown. |
1,971 | def _expon_solve_lam_from_mu(mu, b):
def lam_eq(lam, mu, b):
lam, mu, b = Decimal(lam), Decimal(mu), Decimal(b)
return ( (1 - (lam*b + 1) * np.exp(-lam*b)) /
(lam - lam * np.exp(-lam*b) + Decimal(1e-32)) - mu )
return optim.brentq(lam_eq, -100, 100, args=(mu, b), disp=True) | For the expon_uptrunc, given mu and b, return lam.
Similar to geom_uptrunc |
1,972 | def GaussianLogDensity(x, mu, log_var, name=, EPSILON = 1e-6):
c = mx.sym.ones_like(log_var)*2.0 * 3.1416
c = mx.symbol.log(c)
var = mx.sym.exp(log_var)
x_mu2 = mx.symbol.square(x - mu)
x_mu2_over_var = mx.symbol.broadcast_div(x_mu2, var + EPSILON)
log_prob = -0.5 * (c + log_var + x_mu2_over_var)
log_prob = mx.symbol.sum(log_prob, axis=1, name=name)
return log_prob | GaussianLogDensity loss calculation for layer wise loss |
1,973 | def size(self):
if self._size is None:
self._size = 0
for csv_file in self.files:
self._size += sum(1 if line else 0 for line in _util.open_local_or_gcs(csv_file, ))
return self._size | The size of the schema. If the underlying data source changes, it may be outdated. |
1,974 | def reflectance(self, band):
if band == 6:
raise ValueError()
rad = self.radiance(band)
esun = self.ex_atm_irrad[band - 1]
toa_reflect = (pi * rad * self.earth_sun_dist ** 2) / (esun * cos(self.solar_zenith_rad))
return toa_reflect | :param band: An optical band, i.e. 1-5, 7
:return: At satellite reflectance, [-] |
1,975 | def _scrollView( self, value ):
if self._scrolling:
return
view_bar = self.uiGanttVIEW.verticalScrollBar()
self._scrolling = True
view_bar.setValue(value)
self._scrolling = False | Updates the gantt view scrolling to the inputed value.
:param value | <int> |
1,976 | def _init_db(self):
with self._get_db() as db:
with open(self.schemapath) as f:
db.cursor().executescript(f.read())
db.commit() | Creates the database tables. |
1,977 | def timestampFormat(self, timestampFormat):
if not isinstance(timestampFormat, str):
raise TypeError()
self._timestampFormat = timestampFormat | Setter to _timestampFormat. Formatting string for conversion of timestamps to QtCore.QDateTime
Raises:
AssertionError: if timestampFormat is not of type unicode.
Args:
timestampFormat (unicode): assign timestampFormat to _timestampFormat.
Formatting string for conversion of timestamps to QtCore.QDateTime. Used in data method. |
1,978 | def WriteSignedBinaryBlobs(binary_urn,
blobs,
token = None):
if _ShouldUseLegacyDatastore():
aff4.FACTORY.Delete(binary_urn, token=token)
with data_store.DB.GetMutationPool() as mutation_pool:
with aff4.FACTORY.Create(
binary_urn,
collects.GRRSignedBlob,
mode="w",
mutation_pool=mutation_pool,
token=token) as fd:
for blob in blobs:
fd.Add(blob, mutation_pool=mutation_pool)
if data_store.RelationalDBEnabled():
blob_references = rdf_objects.BlobReferences()
current_offset = 0
for blob in blobs:
blob_id = data_store.BLOBS.WriteBlobWithUnknownHash(
blob.SerializeToString())
blob_references.items.Append(
rdf_objects.BlobReference(
offset=current_offset, size=len(blob.data), blob_id=blob_id))
current_offset += len(blob.data)
data_store.REL_DB.WriteSignedBinaryReferences(
_SignedBinaryIDFromURN(binary_urn), blob_references) | Saves signed blobs to the datastore.
If a signed binary with the given URN already exists, its contents will get
overwritten.
Args:
binary_urn: RDFURN that should serve as a unique identifier for the binary.
blobs: An Iterable of signed blobs to write to the datastore.
token: ACL token to use with the legacy (non-relational) datastore. |
1,979 | def list_to_string(input, delimiter):
if isinstance(input, list):
return delimiter.join(
list_to_string(item, delimiter) for item in input)
return input | converts list to string recursively so that nested lists are supported
:param input: a list of strings and lists of strings (and so on recursive)
:type input: list
:param delimiter: the deimiter to use when joining the items
:type delimiter: str
:returns: the recursively joined list
:rtype: str |
1,980 | def get_axes(process_or_domain):
if isinstance(process_or_domain, Process):
dom = process_or_domain.domains
else:
dom = process_or_domain
if isinstance(dom, _Domain):
return dom.axes
elif isinstance(dom, dict):
axes = {}
for thisdom in list(dom.values()):
assert isinstance(thisdom, _Domain)
axes.update(thisdom.axes)
return axes
else:
raise TypeError() | Returns a dictionary of all Axis in a domain or dictionary of domains.
:param process_or_domain: a process or a domain object
:type process_or_domain: :class:`~climlab.process.process.Process` or
:class:`~climlab.domain.domain._Domain`
:raises: :exc: `TypeError` if input is not or not having a domain
:returns: dictionary of input's Axis
:rtype: dict
:Example:
::
>>> import climlab
>>> from climlab.process.process import get_axes
>>> model = climlab.EBM()
>>> get_axes(model)
{'lat': <climlab.domain.axis.Axis object at 0x7ff13b9dd2d0>,
'depth': <climlab.domain.axis.Axis object at 0x7ff13b9dd310>} |
1,981 | def findall(obj, prs, forced_type=None,
cls=anyconfig.models.processor.Processor):
if (obj is None or not obj) and forced_type is None:
raise ValueError("The first argument or the second argument "
" must be something other than "
"None or False.")
if forced_type is None:
pclss = find_by_maybe_file(obj, prs)
else:
pclss = find_by_type_or_id(forced_type, prs)
return pclss | :param obj:
a file path, file, file-like object, pathlib.Path object or an
'anyconfig.globals.IOInfo` (namedtuple) object
:param prs: A list of :class:`anyconfig.models.processor.Processor` classes
:param forced_type:
Forced processor type of the data to process or ID of the processor
class or None
:param cls: A class object to compare with 'forced_type' later
:return: A list of instances of processor classes to process 'obj' data
:raises: ValueError, UnknownProcessorTypeError, UnknownFileTypeError |
1,982 | def find_file(folder, filename):
matches = []
if os.path.isabs(filename) and os.path.isfile(filename):
return filename
for root, _, filenames in os.walk(folder):
for fn in fnmatch.filter(filenames, filename):
matches.append(os.path.join(root, fn))
if not matches:
raise IOError( % filename)
return matches[-1] | Find a file given folder and filename. If the filename can be
resolved directly returns otherwise walks the supplied folder. |
1,983 | def define_snowflake_config():
account = Field(
String,
description=,
is_optional=True,
)
user = Field(String, description=, is_optional=False)
password = Field(String, description=, is_optional=False)
database = Field(
String,
description=,
is_optional=True,
)
schema = Field(
String,
description=,
is_optional=True,
)
role = Field(
String,
description=,
is_optional=True,
)
warehouse = Field(
String,
description=,
is_optional=True,
)
autocommit = Field(
Bool,
description=,
is_optional=True,
)
client_prefetch_threads = Field(
Int,
description=,
is_optional=True,
)
client_session_keep_alive = Field(
String,
description=,
is_optional=True,
)
login_timeout = Field(
Int,
description=,
is_optional=True,
)
network_timeout = Field(
Int,
description=,
is_optional=True,
)
ocsp_response_cache_filename = Field(
Path,
description=,
is_optional=True,
)
validate_default_parameters = Field(
Bool,
description=t exists if True.pyformat by default for client side binding. Specify qmark or numeric to
change bind variable formats for server side binding.None by default, which honors the Snowflake parameter TIMEZONE. Set to a
valid time zone (e.g. America/Los_Angeles) to set the session time zone.accountuserpassworddatabaseschemarolewarehouseautocommitclient_prefetch_threadsclient_session_keep_alivelogin_timeoutnetwork_timeoutocsp_response_cache_filenamevalidate_default_parametersparamstyletimezoneSnowflake configuration',
) | Snowflake configuration.
See the Snowflake documentation for reference:
https://docs.snowflake.net/manuals/user-guide/python-connector-api.html |
1,984 | def stack_decoders(self, *layers):
self.stack(*layers)
self.decoding_layers.extend(layers) | Stack decoding layers. |
1,985 | def parse(text: str) -> Docstring:
ret = Docstring()
if not text:
return ret
text = inspect.cleandoc(text)
match = _titles_re.search(text)
if match:
desc_chunk = text[: match.start()]
meta_chunk = text[match.start() :]
else:
desc_chunk = text
meta_chunk = ""
parts = desc_chunk.split("\n", 1)
ret.short_description = parts[0] or None
if len(parts) > 1:
long_desc_chunk = parts[1] or ""
ret.blank_after_short_description = long_desc_chunk.startswith("\n")
ret.blank_after_long_description = long_desc_chunk.endswith("\n\n")
ret.long_description = long_desc_chunk.strip() or None
matches = list(_titles_re.finditer(meta_chunk))
if not matches:
return ret
splits = []
for j in range(len(matches) - 1):
splits.append((matches[j].end(), matches[j + 1].start()))
splits.append((matches[-1].end(), len(meta_chunk)))
chunks = {}
for j, (start, end) in enumerate(splits):
title = matches[j].group(1)
if title not in _valid:
continue
chunks[title] = meta_chunk[start:end].strip("\n")
if not chunks:
return ret
for title, chunk in chunks.items():
indent_match = re.search(r"^\s+", chunk)
if not indent_match:
raise ParseError(ft infer indent from "{chunk}"No specification for "{title}": "{chunk}"')
c_splits = []
for j in range(len(c_matches) - 1):
c_splits.append((c_matches[j].end(), c_matches[j + 1].start()))
c_splits.append((c_matches[-1].end(), len(chunk)))
for j, (start, end) in enumerate(c_splits):
part = chunk[start:end].strip("\n")
ret.meta.append(_build_meta(part, title))
return ret | Parse the Google-style docstring into its components.
:returns: parsed docstring |
1,986 | def from_pandas(df, value=, x=, y=, cellx=None, celly=None, xmin=None, ymax=None,
geot=None, nodata_value=None, projection=None, datatype=None):
if not cellx:
cellx = (df.sort_values(x)[x]-df.sort_values(x).shift(1)[x]).max()
if not celly:
celly = (df.sort_values(y, ascending=False)[y]-df.sort_values(y, ascending=False).shift(1)[y]).drop_duplicates().replace(0).max()
if not xmin:
xmin = df[x].min()
if not ymax:
ymax = df[y].max()
row, col = map_pixel(df[x], df[y], cellx, celly, xmin, ymax)
dfout = pd.DataFrame(np.array([row, col, df[value]]).T, columns=[, , ])
dfout = dfout = dfout.set_index(["row","col"]).unstack().value.reindex(index=np.arange(0,np.max(row)+1)).T.reindex(index=np.arange(0,np.max(col)+1)).T
if nodata_value:
dfout[np.isnan(dfout)] = nodata_value
if not nodata_value:
nodata_value = np.nan
if not geot:
geot = (xmin, cellx, 0, ymax, 0, celly)
return GeoRaster(dfout, geot, nodata_value=nodata_value, projection=projection, datatype=datatype) | Creates a GeoRaster from a Pandas DataFrame. Useful to plot or export data to rasters.
Usage:
raster = from_pandas(df, value='value', x='x', y='y', cellx= cellx, celly=celly,
xmin=xmin, ymax=ymax, geot=geot, nodata_value=ndv,
projection=projection, datatype=datatype)
Although it does not require all the inputs, it is highly recommended to include
the geographical information, so that the GeoRaster is properly constructed. As usual,
the information can be added afterwards directly to the GeoRaster. |
1,987 | def rget(d, key):
if not isinstance(d, dict):
return None
assert isinstance(key, str) or isinstance(key, list)
keys = key.split() if isinstance(key, str) else key
cdrs = cdr(keys)
cars = car(keys)
return rget(d.get(cars), cdrs) if cdrs else d.get(cars) | Recursively get keys from dict, for example:
'a.b.c' --> d['a']['b']['c'], return None if not exist. |
1,988 | def make_app(config=None):
config = config or {}
app = CoolMagicApplication(config)
app = SharedDataMiddleware(
app, {"/public": path.join(path.dirname(__file__), "public")}
)
app = local_manager.make_middleware(app)
return app | Factory function that creates a new `CoolmagicApplication`
object. Optional WSGI middlewares should be applied here. |
1,989 | def verify_tree_consistency(self, old_tree_size: int, new_tree_size: int,
old_root: bytes, new_root: bytes,
proof: Sequence[bytes]):
old_size = old_tree_size
new_size = new_tree_size
if old_size < 0 or new_size < 0:
raise ValueError("Negative tree size")
if old_size > new_size:
raise ValueError("Older tree has bigger size (%d vs %d), did "
"you supply inputs in the wrong order?" %
(old_size, new_size))
if old_size == new_size:
if old_root == new_root:
if proof:
logging.debug("Trees are identical, ignoring proof")
return True
else:
raise error.ConsistencyError("Inconsistency: different root "
"hashes for the same tree size")
if old_size == 0:
if proof:
logging.debug("Ignoring non-empty consistency proof for "
"empty tree.")
return True
node = old_size - 1
last_node = new_size - 1
while node % 2:
node //= 2
last_node //= 2
p = iter(proof)
try:
if node:
new_hash = old_hash = next(p)
else:
new_hash = old_hash = old_root
while node:
if node % 2:
next_node = next(p)
old_hash = self.hasher.hash_children(next_node, old_hash)
new_hash = self.hasher.hash_children(next_node, new_hash)
elif node < last_node:
new_hash = self.hasher.hash_children(new_hash, next(p))
node //= 2
last_node //= 2
while last_node:
n = next(p)
new_hash = self.hasher.hash_children(new_hash, n)
last_node //= 2
try:
next(p)
except StopIteration:
pass
else:
logging.debug("Proof has extra nodes")
return True | Verify the consistency between two root hashes.
old_tree_size must be <= new_tree_size.
Args:
old_tree_size: size of the older tree.
new_tree_size: size of the newer_tree.
old_root: the root hash of the older tree.
new_root: the root hash of the newer tree.
proof: the consistency proof.
Returns:
True. The return value is enforced by a decorator and need not be
checked by the caller.
Raises:
ConsistencyError: the proof indicates an inconsistency
(this is usually really serious!).
ProofError: the proof is invalid.
ValueError: supplied tree sizes are invalid. |
1,990 | def get_firewall_rules(self, server):
server_uuid, server_instance = uuid_and_instance(server)
url = .format(server_uuid)
res = self.get_request(url)
return [
FirewallRule(server=server_instance, **firewall_rule)
for firewall_rule in res[][]
] | Return all FirewallRule objects based on a server instance or uuid. |
1,991 | def registerPolling(self, fd, options = POLLING_IN|POLLING_OUT, daemon = False):
self.polling.register(fd, options, daemon) | register a polling file descriptor
:param fd: file descriptor or socket object
:param options: bit mask flags. Polling object should ignore the incompatible flag. |
1,992 | def _add_docstring(format_dict):
def add_docstring_context(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
wrapper.__doc__ = func.__doc__.format(**format_dict)
return wrapper
return add_docstring_context | Format a doc-string on the fly.
@arg format_dict: A dictionary to format the doc-strings
Example:
@add_docstring({'context': __doc_string_context})
def predict(x):
'''
{context}
>> model.predict(data)
'''
return x |
1,993 | def _execute(job, f, o=None):
out = ctypes.create_string_buffer(RS_JOB_BLOCKSIZE)
while True:
block = f.read(RS_JOB_BLOCKSIZE)
buff = Buffer()
buff.next_in = ctypes.c_char_p(block)
buff.avail_in = ctypes.c_size_t(len(block))
buff.eof_in = ctypes.c_int(not block)
buff.next_out = ctypes.cast(out, ctypes.c_char_p)
buff.avail_out = ctypes.c_size_t(RS_JOB_BLOCKSIZE)
r = _librsync.rs_job_iter(job, ctypes.byref(buff))
if o:
o.write(out.raw[:RS_JOB_BLOCKSIZE - buff.avail_out])
if r == RS_DONE:
break
elif r != RS_BLOCKED:
raise LibrsyncError(r)
if buff.avail_in > 0:
f.seek(f.tell() - buff.avail_in)
if o and callable(getattr(o, , None)):
o.seek(0)
return o | Executes a librsync "job" by reading bytes from `f` and writing results to
`o` if provided. If `o` is omitted, the output is ignored. |
1,994 | def _complete_exit(self, cmd, args, text):
if args:
return
return [ x for x in { , , } \
if x.startswith(text) ] | Find candidates for the 'exit' command. |
1,995 | def pointwise_free_energies(self, therm_state=None):
r
assert self.therm_energies is not None, \
d before pointwise free energies can be calculated.'
if therm_state is not None:
assert therm_state<=self.nthermo
mu = [_np.zeros(d.shape[0], dtype=_np.float64) for d in self.dtrajs+self.equilibrium_dtrajs]
if self.equilibrium is None:
_tram.get_pointwise_unbiased_free_energies(
therm_state,
self.log_lagrangian_mult, self.biased_conf_energies,
self.therm_energies, self.count_matrices,
self.btrajs, self.dtrajs,
self.state_counts, None, None, mu)
else:
_trammbar.get_pointwise_unbiased_free_energies(
therm_state,
self.log_lagrangian_mult, self.biased_conf_energies,
self.therm_energies, self.count_matrices,
self.btrajs+self.equilibrium_btrajs, self.dtrajs+self.equilibrium_dtrajs,
self.state_counts, None, None, mu,
equilibrium_therm_state_counts=self.equilibrium_state_counts.sum(axis=1).astype(_np.intc),
overcounting_factor=1.0/self.lag)
return mu | r"""
Computes the pointwise free energies :math:`-\log(\mu^k(x))` for all points x.
:math:`\mu^k(x)` is the optimal estimate of the Boltzmann distribution
of the k'th ensemble defined on the set of all samples.
Parameters
----------
therm_state : int or None, default=None
Selects the thermodynamic state k for which to compute the
pointwise free energies.
None selects the "unbiased" state which is defined by having
zero bias energy.
Returns
-------
mu_k : list of numpy.ndarray(X_i, dtype=numpy.float64)
list of the same layout as dtrajs (or ttrajs). mu_k[i][t]
contains the pointwise free energy of the frame seen in
trajectory i and time step t.
Frames that are not in the connected sets get assiged an
infinite pointwise free energy. |
1,996 | def warning(f, *args, **kwargs):
kwargs.update({: logging.WARNING})
return _stump(f, *args, **kwargs) | Automatically log progress on function entry and exit. Default logging
value: warning.
*Logging with values contained in the parameters of the decorated function*
Message (args[0]) may be a string to be formatted with parameters passed to
the decorated function. Each '{varname}' will be replaced by the value of
the parameter of the same name.
*Keyword parameters*
- log :: integer
- Specifies a custom level of logging to pass to the active logger.
- Default: WARNING
*Exceptions:*
- IndexError and ValueError
- will be returned if *args contains a string that does not correspond to
a parameter name of the decorated function, or if there are more '{}'s
than there are *args. |
1,997 | def get_mcu_definition(self, project_file):
project_file = join(getcwd(), project_file)
coproj_dic = xmltodict.parse(file(project_file), dict_constructor=dict)
mcu = MCU_TEMPLATE
IROM1_index = self._coproj_find_option(coproj_dic[][][][][][], , )
IROM2_index = self._coproj_find_option(coproj_dic[][][][][][], , )
IRAM1_index = self._coproj_find_option(coproj_dic[][][][][][], , )
IRAM2_index = self._coproj_find_option(coproj_dic[][][][][][], , )
defaultAlgorithm_index = self._coproj_find_option(coproj_dic[][][][], , )
mcu[] = {
: {
: {
: [coproj_dic[][][][]],
: [coproj_dic[][][][]],
: [coproj_dic[][][][]],
: [coproj_dic[][][][]],
},
: {
: [coproj_dic[][][][][defaultAlgorithm_index][]],
},
: {
: {
: [coproj_dic[][][][][][][IROM1_index][]],
: [coproj_dic[][][][][][][IROM1_index][]],
: [coproj_dic[][][][][][][IROM1_index][]],
: [coproj_dic[][][][][][][IROM1_index][]],
},
: {
: [coproj_dic[][][][][][][IRAM1_index][]],
: [coproj_dic[][][][][][][IRAM1_index][]],
: [coproj_dic[][][][][][][IRAM1_index][]],
: [coproj_dic[][][][][][][IRAM1_index][]],
},
: {
: [coproj_dic[][][][][][][IROM2_index][]],
: [coproj_dic[][][][][][][IROM2_index][]],
: [coproj_dic[][][][][][][IROM2_index][]],
: [coproj_dic[][][][][][][IROM2_index][]],
},
: {
: [coproj_dic[][][][][][][IRAM2_index][]],
: [coproj_dic[][][][][][][IRAM2_index][]],
: [coproj_dic[][][][][][][IRAM2_index][]],
: [coproj_dic[][][][][][][IRAM2_index][]],
}
}
}
}
return mcu | Parse project file to get mcu definition |
1,998 | def close (self, force=True):
if not self.closed:
self.flush()
os.close (self.child_fd)
time.sleep(self.delayafterclose)
if self.isalive():
if not self.terminate(force):
raise ExceptionPexpect ()
self.child_fd = -1
self.closed = True | This closes the connection with the child application. Note that
calling close() more than once is valid. This emulates standard Python
behavior with files. Set force to True if you want to make sure that
the child is terminated (SIGKILL is sent if the child ignores SIGHUP
and SIGINT). |
1,999 | def warn_with_traceback(message, category, filename, lineno, file=None, line=None):
import traceback
traceback.print_stack()
log = file if hasattr(file, ) else sys.stderr
settings.write(warnings.formatwarning(message, category, filename, lineno, line)) | Get full tracebacks when warning is raised by setting
warnings.showwarning = warn_with_traceback
See also
--------
http://stackoverflow.com/questions/22373927/get-traceback-of-warnings |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.