Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
2,300
def get_live_data_flat_binary(self): byte_buffer = self.game.UpdateLiveDataPacketFlatbuffer() if byte_buffer.size >= 4: proto_string = ctypes.string_at(byte_buffer.ptr, byte_buffer.size) self.game.Free(byte_buffer.ptr) self.game_status(None, RLBotCoreStatus.Success) return proto_string
Gets the live data packet in flatbuffer binary format. You'll need to do something like GameTickPacket.GetRootAsGameTickPacket(binary, 0) to get the data out. This is a temporary method designed to keep the integration test working. It returns the raw bytes of the flatbuffer so that it can be stored in a file. We can get rid of this once we have a first-class data recorder that lives inside the core dll.
2,301
def getReverseRankMaps(self): reverseRankMaps = [] for preference in self.preferences: reverseRankMaps.append(preference.getReverseRankMap()) return reverseRankMaps
Returns a list of dictionaries, one for each preference, that associates each position in the ranking with a list of integer representations of the candidates ranked at that position and returns a list of the number of times each preference is given.
2,302
def function(self, addr=None, name=None, create=False, syscall=False, plt=None): if addr is not None: try: f = self._function_map.get(addr) if plt is None or f.is_plt == plt: return f except KeyError: if create: f = self._function_map[addr] if name is not None: f.name = name if syscall: f.is_syscall=True return f elif name is not None: for func in self._function_map.values(): if func.name == name: if plt is None or func.is_plt == plt: return func return None
Get a function object from the function manager. Pass either `addr` or `name` with the appropriate values. :param int addr: Address of the function. :param str name: Name of the function. :param bool create: Whether to create the function or not if the function does not exist. :param bool syscall: True to create the function as a syscall, False otherwise. :param bool or None plt: True to find the PLT stub, False to find a non-PLT stub, None to disable this restriction. :return: The Function instance, or None if the function is not found and create is False. :rtype: Function or None
2,303
def set_stop_chars_left(self, stop_chars): if not isinstance(stop_chars, set): raise TypeError("stop_chars should be type set " "but {} was given".format(type(stop_chars))) self._stop_chars_left = stop_chars self._stop_chars = self._stop_chars_left | self._stop_chars_right
Set stop characters for text on left from TLD. Stop characters are used when determining end of URL. :param set stop_chars: set of characters :raises: TypeError
2,304
def color_is_forced(**envars): 1 result = env.CLICOLOR_FORCE and env.CLICOLOR_FORCE != log.debug(, result, env.CLICOLOR_FORCE or ) for name, value in envars.items(): envar = getattr(env, name) if envar.value == value: result = True log.debug(, name, value, result) return result
Look for clues in environment, e.g.: - https://bixense.com/clicolors/ Arguments: envars: Additional environment variables to check for equality, i.e. ``MYAPP_COLOR_FORCED='1'`` Returns: Bool: Forced
2,305
def set_tag(self, tag): if self._world: if self._world.get_entity_by_tag(tag): raise NonUniqueTagError(tag) self._tag = tag
Sets the tag. If the Entity belongs to the world it will check for tag conflicts.
2,306
def is_letter(uni_char): category = Category.get(uni_char) return (category == Category.UPPERCASE_LETTER or category == Category.LOWERCASE_LETTER or category == Category.TITLECASE_LETTER or category == Category.MODIFIER_LETTER or category == Category.OTHER_LETTER)
Determine whether the given Unicode character is a Unicode letter
2,307
def parse_note(cls, note): if isinstance(note, tuple): if len(note) != 2: raise ValueError() return note try: match = cls.re_note.match(note) except TypeError: return note, None return match.groups()
Parse string annotation into object reference with optional name.
2,308
def update_template(self, template_id, template_dict): return self._create_put_request( resource=TEMPLATES, billomat_id=template_id, send_data=template_dict )
Updates a template :param template_id: the template id :param template_dict: dict :return: dict
2,309
def _compile_seriesflow(self): string = self.seriesflow = compile(eval(string), , )
Post power flow computation of series device flow
2,310
def put_text(self, key, text): with open(key, "w") as fh: fh.write(text)
Put the text into the storage associated with the key.
2,311
def lookup_casstype(casstype): if isinstance(casstype, (CassandraType, CassandraTypeType)): return casstype try: return parse_casstype_args(casstype) except (ValueError, AssertionError, IndexError) as e: raise ValueError("Don't know how to parse type string %r: %s" % (casstype, e))
Given a Cassandra type as a string (possibly including parameters), hand back the CassandraType class responsible for it. If a name is not recognized, a custom _UnrecognizedType subclass will be created for it. Example: >>> lookup_casstype('org.apache.cassandra.db.marshal.MapType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.Int32Type)') <class 'cassandra.cqltypes.MapType(UTF8Type, Int32Type)'>
2,312
def error_values_summary(error_values, **summary_df_kwargs): df = pf.summary_df_from_multi(error_values, **summary_df_kwargs) imp_std, imp_std_unc, imp_frac, imp_frac_unc = \ nestcheck.error_analysis.implementation_std( df.loc[(, )], df.loc[(, )], df.loc[(, )], df.loc[(, )]) df.loc[(, ), df.columns] = imp_std df.loc[(, ), df.columns] = imp_std_unc df.loc[(, ), :] = imp_frac df.loc[(, ), :] = imp_frac_unc if in set(df.index.get_level_values()): imp_rmse, imp_rmse_unc, imp_frac, imp_frac_unc = \ nestcheck.error_analysis.implementation_std( df.loc[(, )], df.loc[(, )], df.loc[(, )], df.loc[(, )]) df.loc[(, ), df.columns] = imp_rmse df.loc[(, ), df.columns] = \ imp_rmse_unc df.loc[(, ), :] = imp_frac df.loc[(, ), :] = imp_frac_unc calcs_to_keep = [, , , , , , , , , , , , ] df = pd.concat([df.xs(calc, level=, drop_level=False) for calc in calcs_to_keep if calc in df.index.get_level_values()]) return df
Get summary statistics about calculation errors, including estimated implementation errors. Parameters ---------- error_values: pandas DataFrame Of format output by run_list_error_values (look at it for more details). summary_df_kwargs: dict, optional See pandas_functions.summary_df docstring for more details. Returns ------- df: pandas DataFrame Table showing means and standard deviations of results and diagnostics for the different runs. Also contains estimated numerical uncertainties on results.
2,313
def formatPathExpressions(seriesList): pathExpressions = sorted(set([s.pathExpression for s in seriesList])) return .join(pathExpressions)
Returns a comma-separated list of unique path expressions.
2,314
def add(self, item): if not item.startswith(self.prefix): item = os.path.join(self.base, item) self.files.add(os.path.normpath(item))
Add a file to the manifest. :param item: The pathname to add. This can be relative to the base.
2,315
def get_socket(host, port, timeout=None): for res in getaddrinfo(host, port, 0, SOCK_STREAM): af, socktype, proto, canonname, sa = res sock = None try: sock = socket(af, socktype, proto) if timeout is not None: sock.settimeout(timeout) sock.connect(sa) return sock except error: if sock is not None: sock.close() raise error
Return a socket. :param str host: the hostname to connect to :param int port: the port number to connect to :param timeout: if specified, set the socket timeout
2,316
def get_max_id(self, object_type, role): if object_type == : objectclass = ldap_attr = elif object_type == : objectclass = ldap_attr = else: raise ldap_tools.exceptions.InvalidResult() minID, maxID = Client.__set_id_boundary(role) filter = [ "(objectclass={})".format(objectclass), "({}>={})".format(ldap_attr, minID) ] if maxID is not None: filter.append("({}<={})".format(ldap_attr, maxID)) id_list = self.search(filter, [ldap_attr]) if id_list == []: id = minID else: if object_type == : id = max([i.uidNumber.value for i in id_list]) + 1 elif object_type == : id = max([i.gidNumber.value for i in id_list]) + 1 else: raise ldap_tools.exceptions.InvalidResult() return id
Get the highest used ID.
2,317
def _replace_numeric_markers(operation, string_parameters): def replace_markers(marker, op, parameters): param_count = len(parameters) marker_index = 0 start_offset = 0 while True: found_offset = op.find(marker, start_offset) if not found_offset > -1: break if marker_index < param_count: op = op[:found_offset]+op[found_offset:].replace(marker, parameters[marker_index], 1) start_offset = found_offset + len(parameters[marker_index]) marker_index += 1 else: raise ProgrammingError("Incorrect number of bindings " "supplied. The current statement uses " "%d or more, and there are %d " "supplied." % (marker_index + 1, param_count)) if marker_index != 0 and marker_index != param_count: raise ProgrammingError("Incorrect number of bindings " "supplied. The current statement uses " "%d or more, and there are %d supplied." % (marker_index + 1, param_count)) return op operation = replace_markers(, operation, string_parameters) operation = replace_markers(r, operation, string_parameters) string_parameters[index - 1]) return operation
Replaces qname, format, and numeric markers in the given operation, from the string_parameters list. Raises ProgrammingError on wrong number of parameters or bindings when using qmark. There is no error checking on numeric parameters.
2,318
def infer_year(date): if isinstance(date, str): pattern = r result = re.match(pattern, date) if result: return int(result.groupdict()[]) else: raise ValueError(.format(date)) elif isinstance(date, np.datetime64): return date.item().year else: return date.year
Given a datetime-like object or string infer the year. Parameters ---------- date : datetime-like object or str Input date Returns ------- int Examples -------- >>> infer_year('2000') 2000 >>> infer_year('2000-01') 2000 >>> infer_year('2000-01-31') 2000 >>> infer_year(datetime.datetime(2000, 1, 1)) 2000 >>> infer_year(np.datetime64('2000-01-01')) 2000 >>> infer_year(DatetimeNoLeap(2000, 1, 1)) 2000 >>>
2,319
def combine(self, expert_out, multiply_by_gates=True): stitched = common_layers.convert_gradient_to_tensor( tf.concat(expert_out, 0)) if multiply_by_gates: stitched *= tf.expand_dims(self._nonzero_gates, 1) combined = tf.unsorted_segment_sum(stitched, self._batch_index, tf.shape(self._gates)[0]) return combined
Sum together the expert output, weighted by the gates. The slice corresponding to a particular batch element `b` is computed as the sum over all experts `i` of the expert output, weighted by the corresponding gate values. If `multiply_by_gates` is set to False, the gate values are ignored. Args: expert_out: a list of `num_experts` `Tensor`s, each with shape `[expert_batch_size_i, <extra_output_dims>]`. multiply_by_gates: a boolean Returns: a `Tensor` with shape `[batch_size, <extra_output_dims>]`.
2,320
def cancel_broadcast(self, broadcast_guid): subpath = % broadcast_guid broadcast = {: } bcast_dict = self._call(subpath, method=, data=broadcast, content_type=) return bcast_dict
Cancel a broadcast specified by guid
2,321
def get_image(roi_rec, short, max_size, mean, std): im = imdecode(roi_rec[]) if roi_rec["flipped"]: im = im[:, ::-1, :] im, im_scale = resize(im, short, max_size) height, width = im.shape[:2] im_info = np.array([height, width, im_scale], dtype=np.float32) im_tensor = transform(im, mean, std) if roi_rec[].size > 0: gt_inds = np.where(roi_rec[] != 0)[0] gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32) gt_boxes[:, 0:4] = roi_rec[][gt_inds, :] gt_boxes[:, 4] = roi_rec[][gt_inds] gt_boxes[:, 0:4] *= im_scale else: gt_boxes = np.empty((0, 5), dtype=np.float32) return im_tensor, im_info, gt_boxes
read, resize, transform image, return im_tensor, im_info, gt_boxes roi_rec should have keys: ["image", "boxes", "gt_classes", "flipped"] 0 --- x (width, second dim of im) | y (height, first dim of im)
2,322
def get_inters(r, L, R_cut): if r.shape[1] == 2: _cell_list.cell_list_2d.make_inters(r.T, L, R_cut) elif r.shape[1] == 3: _cell_list.cell_list_3d.make_inters(r.T, L, R_cut) else: print( ) return get_inters_direct(r, L, R_cut) return _parse_inters()
Return points within a given cut-off of each other, in a periodic system. Uses a cell-list. Parameters ---------- r: array, shape (n, d) where d is one of (2, 3). A set of n point coordinates. Coordinates are assumed to lie in [-L / 2, L / 2]. L: float. Bounds of the system. R_cut: float. The maximum distance within which to consider points to lie near each other. Returns ------- inters: array, shape (n, n) Indices of the nearby points. For each particle indexed by the first axis, the second axis lists each index of a nearby point. intersi: array, shape (n,) Total number of nearby points. This array should be used to index `inters`, as for point `i`, elements in `inters[i]` beyond `intersi[i]` have no well-defined value.
2,323
def send_message(self, number, content): outgoing_message = TextMessageProtocolEntity(content.encode("utf-8") if sys.version_info >= (3, 0) else content, to=self.normalize_jid(number)) self.toLower(outgoing_message) return outgoing_message
Send message :param str number: phone number with cc (country code) :param str content: body text of the message
2,324
def clearLocatorCache(self, login, tableName): self.send_clearLocatorCache(login, tableName) self.recv_clearLocatorCache()
Parameters: - login - tableName
2,325
def _check_token(self): need_token = (self._token_info is None or self.auth_handler.is_token_expired(self._token_info)) if need_token: new_token = \ self.auth_handler.refresh_access_token( self._token_info[]) if new_token is None: return self._token_info = new_token self._auth_header = {"content-type": "application/json", "Authorization": "Bearer {}".format( self._token_info.get())}
Simple Mercedes me API.
2,326
def setup_exchange(self, exchange_name): _logger.debug(, exchange_name) self._channel.exchange_declare(self.on_exchange_declareok, exchange_name, self.EXCHANGE_TYPE, durable=True, passive=True)
Setup the exchange on RabbitMQ by invoking the Exchange.Declare RPC command. When it is complete, the on_exchange_declareok method will be invoked by pika. :param str|unicode exchange_name: The name of the exchange to declare
2,327
def chart_part(self): rId = self._element.chart_rId chart_part = self.part.related_parts[rId] return chart_part
The |ChartPart| object containing the chart in this graphic frame.
2,328
def _make_writeable(filename): import stat if sys.platform.startswith(): return if not os.access(filename, os.W_OK): st = os.stat(filename) new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR os.chmod(filename, new_permissions)
Make sure that the file is writeable. Useful if our source is read-only.
2,329
def _change_color(self, event): h = self.bar.get() self.square.set_hue(h) (r, g, b), (h, s, v), sel_color = self.square.get() self.red.set(r) self.green.set(g) self.blue.set(b) self.hue.set(h) self.saturation.set(s) self.value.set(v) self.hexa.delete(0, "end") self.hexa.insert(0, sel_color.upper()) if self.alpha_channel: self.alphabar.set_color((r, g, b)) self.hexa.insert(, ("%2.2x" % self.alpha.get()).upper()) self._update_preview()
Respond to motion of the hsv cursor.
2,330
def p_expression_logical(self, p): p[0] = Expression(left=p[1], operator=p[2], right=p[3])
expression : expression logical expression
2,331
def coarsen_line(line, level=2, exponential=False, draw=True): xdata = line.get_xdata() ydata = line.get_ydata() xdata,ydata = _fun.coarsen_data(xdata, ydata, level=level, exponential=exponential) if len(ydata) == 0: print("There's nothing left in "+str(line)+"!") else: line.set_data(xdata, ydata) if draw: _pylab.draw()
Coarsens the specified line (see spinmob.coarsen_data() for more information). Parameters ---------- line Matplotlib line instance. level=2 How strongly to coarsen. exponential=False If True, use the exponential method (great for log-x plots). draw=True Redraw when complete.
2,332
def get_cipher(self): return self.cipher_class.new(self.cipher_key, self.cipher_class.MODE_CBC, b * self.cipher_class.block_size)
Return a new Cipher object for each time we want to encrypt/decrypt. This is because pgcrypto expects a zeroed block for IV (initial value), but the IV on the cipher object is cumulatively updated each time encrypt/decrypt is called.
2,333
def read_relative_file(filename, relative_to=None): if relative_to is None: relative_to = os.path.dirname(__file__) with open(os.path.join(os.path.dirname(relative_to), filename)) as f: return f.read()
Returns contents of the given file, which path is supposed relative to this package.
2,334
def findNestedClassLike(self, lst): if self.kind == "class" or self.kind == "struct": lst.append(self) for c in self.children: c.findNestedClassLike(lst)
Recursive helper function for finding nested classes and structs. If this node is a class or struct, it is appended to ``lst``. Each node also calls each of its child ``findNestedClassLike`` with the same list. :Parameters: ``lst`` (list) The list each class or struct node is to be appended to.
2,335
def list_actions(name, location=): r\\minion-id with salt.utils.winapi.Com(): task_service = win32com.client.Dispatch("Schedule.Service") task_service.Connect() task_folder = task_service.GetFolder(location) task_definition = task_folder.GetTask(name).Definition actions = task_definition.Actions ret = [] for action in actions: ret.append(action.Id) return ret
r''' List all actions that pertain to a task in the specified location. :param str name: The name of the task for which list actions. :param str location: A string value representing the location of the task from which to list actions. Default is '\\' which is the root for the task scheduler (C:\Windows\System32\tasks). :return: Returns a list of actions. :rtype: list CLI Example: .. code-block:: bash salt 'minion-id' task.list_actions <task_name>
2,336
def frames(self, flush=True): self.flush() ret_val, frame = self._sensor.read() if not ret_val: raise Exception("Unable to retrieve frame from OpenCVCameraSensor for id {0}".format(self._device_id)) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) if self._upside_down: frame = np.flipud(frame).astype(np.uint8) frame = np.fliplr(frame).astype(np.uint8) return ColorImage(frame)
Returns the latest color image from the stream Raises: Exception if opencv sensor gives ret_val of 0
2,337
def get_dependent_items(self, item) -> typing.List: with self.__dependency_tree_lock: return copy.copy(self.__dependency_tree_source_to_target_map.get(weakref.ref(item), list()))
Return the list of data items containing data that directly depends on data in this item.
2,338
def pad(num, n=2, sign=False): s = unicode(abs(num)) if len(s) < n: s = * (n - len(s)) + s if not sign: return s if num >= 0: return + s else: return + s
returns n digit string representation of the num
2,339
def _pct_escape_handler(err): chunk = err.object[err.start:err.end] replacements = _pct_encoded_replacements(chunk) return ("".join(replacements), err.end)
Encoding error handler that does percent-escaping of Unicode, to be used with codecs.register_error TODO: replace use of this with urllib.parse.quote as appropriate
2,340
def clean(self, timeout=60): self.refresh() tds = self[] ftp = self[] was_disabled_initially = self.disabled try: if (not was_disabled_initially and \ self.service.splunk_version < (5,)): raise OperationError("Cleaning index %s took longer than %s seconds; timing out." % (self.name, timeout)) finally: self.update(maxTotalDataSizeMB=tds, frozenTimePeriodInSecs=ftp) if (not was_disabled_initially and \ self.service.splunk_version < (5,)): self.enable() return self
Deletes the contents of the index. This method blocks until the index is empty, because it needs to restore values at the end of the operation. :param timeout: The time-out period for the operation, in seconds (the default is 60). :type timeout: ``integer`` :return: The :class:`Index`.
2,341
def set_href_prefix(self, prefix): self.href_prefix = prefix for property_ in self.properties.values(): property_.set_href_prefix(prefix) for action_name in self.actions.keys(): for action in self.actions[action_name]: action.set_href_prefix(prefix)
Set the prefix of any hrefs associated with this thing. prefix -- the prefix
2,342
def get_status_code_and_schema_rst(self, responses): for status_code, response_schema in responses.items(): status_code = int(status_code) schema = response_schema.get(, None) status = HTTP_STATUS_CODES.get(status_code, None) if status is None or not (100 < status_code < 300): continue self.write(, 1) self.write() self.write(, 1) self.write() self.write(.format(status_code, status), 2) self.write(.format(response_schema[]), 2) self.write(, 2) self.write() if schema: self.schema_handler(schema) else: self.write(, self.indent_depth)
Function for prepare information about responses with example, prepare only responses with status code from `101` to `299` :param responses: -- dictionary that contains responses, with status code as key :type responses: dict :return:
2,343
def plot_groups_unplaced(self, fout_dir=".", **kws_usr): plotobj = PltGroupedGos(self) return plotobj.plot_groups_unplaced(fout_dir, **kws_usr)
Plot each GO group.
2,344
def sampling_volume_value(self): svi = self.pdx.SamplingVolume tli = self.pdx.TransmitLength return self._sampling_volume_value(svi, tli)
Returns the device samping volume value in m.
2,345
def set_color_scheme(self, foreground_color, background_color): self.ansi_handler.set_color_scheme(foreground_color, background_color) background_color = QColor(background_color) foreground_color = QColor(foreground_color) self.set_palette(background=background_color, foreground=foreground_color) self.set_pythonshell_font()
Set color scheme of the console (foreground and background).
2,346
def get_privacy_options(user): privacy_options = {} for ptype in user.permissions: for field in user.permissions[ptype]: if ptype == "self": privacy_options["{}-{}".format(field, ptype)] = user.permissions[ptype][field] else: privacy_options[field] = user.permissions[ptype][field] return privacy_options
Get a user's privacy options to pass as an initial value to a PrivacyOptionsForm.
2,347
def cumulative_detections(dates=None, template_names=None, detections=None, plot_grouped=False, group_name=None, rate=False, plot_legend=True, ax=None, **kwargs): import matplotlib.pyplot as plt from eqcorrscan.core.match_filter import Detection colors = cycle([, , , , , , , , , , ]) linestyles = cycle([, , , ]) if not detections: if type(dates[0]) != list: dates = [dates] else: dates = [] template_names = [] for detection in detections: if not type(detection) == Detection: raise IOError( ) dates.append(detection.detect_time.datetime) template_names.append(detection.template_name) _dates = [] _template_names = [] for template_name in list(set(template_names)): _template_names.append(template_name) _dates.append([date for i, date in enumerate(dates) if template_names[i] == template_name]) dates = _dates template_names = _template_names if plot_grouped: _dates = [] for template_dates in dates: _dates += template_dates dates = [_dates] if group_name: template_names = group_name else: template_names = [] if ax is None: fig = plt.figure() ax = fig.add_subplot(1, 1, 1) else: fig = ax.figure() ax.margins(0, 0) min_date = min([min(_d) for _d in dates]) max_date = max([max(_d) for _d in dates]) for k, template_dates in enumerate(dates): template_dates.sort() plot_dates = deepcopy(template_dates) plot_dates.insert(0, min_date) plot_dates.insert(-1, template_dates[-1]) color = next(colors) if color == : linestyle = next(linestyles) counts = np.arange(-1, len(template_dates) + 1) if rate: if not plot_grouped: msg = raise NotImplementedError(msg) if 31 < (max_date - min_date).days < 365: bins = (max_date - min_date).days ax.set_ylabel() elif (max_date - min_date).days <= 31: bins = (max_date - min_date).days * 4 ax.set_ylabel() else: bins = (max_date - min_date).days // 7 ax.set_ylabel() if len(plot_dates) <= 10: bins = 1 ax.hist(mdates.date2num(plot_dates), bins=bins, label=, color=, alpha=0.5) else: ax.plot(plot_dates, counts, linestyle, color=color, label=template_names[k], linewidth=2.0, drawstyle=) ax.set_ylabel() ax.set_xlabel() mins = mdates.MinuteLocator() max_date = dates[0][0] min_date = max_date for date_list in dates: if max(date_list) > max_date: max_date = max(date_list) if min(date_list) < min_date: min_date = min(date_list) timedif = max_date - min_date if 10800 <= timedif.total_seconds() <= 25200: hours = mdates.MinuteLocator(byminute=[0, 30]) mins = mdates.MinuteLocator(byminute=np.arange(0, 60, 10)) elif 7200 <= timedif.total_seconds() < 10800: hours = mdates.MinuteLocator(byminute=[0, 15, 30, 45]) mins = mdates.MinuteLocator(byminute=np.arange(0, 60, 5)) elif timedif.total_seconds() <= 1200: hours = mdates.MinuteLocator(byminute=np.arange(0, 60, 2)) mins = mdates.MinuteLocator(byminute=np.arange(0, 60, 0.5)) elif 25200 < timedif.total_seconds() <= 86400: hours = mdates.HourLocator(byhour=np.arange(0, 24, 3)) mins = mdates.HourLocator(byhour=np.arange(0, 24, 1)) elif 86400 < timedif.total_seconds() <= 172800: hours = mdates.HourLocator(byhour=np.arange(0, 24, 6)) mins = mdates.HourLocator(byhour=np.arange(0, 24, 1)) elif timedif.total_seconds() > 172800: hours = mdates.AutoDateLocator() mins = mdates.HourLocator(byhour=np.arange(0, 24, 3)) else: hours = mdates.MinuteLocator(byminute=np.arange(0, 60, 5)) if timedif.total_seconds() < 172800: ax.xaxis.set_minor_locator(mins) hrFMT = mdates.DateFormatter() else: hrFMT = mdates.DateFormatter() ax.xaxis.set_major_locator(hours) ax.xaxis.set_major_formatter(hrFMT) fig.autofmt_xdate() locs, labels = plt.xticks() plt.setp(labels, rotation=15) if not rate: ax.set_ylim([0, max([len(_d) for _d in dates])]) if plot_legend: if ax.legend() is not None: leg = ax.legend(loc=2, prop={: 8}, ncol=2) leg.get_frame().set_alpha(0.5) fig = _finalise_figure(fig=fig, **kwargs) return fig
Plot cumulative detections or detection rate in time. Simple plotting function to take a list of either datetime objects or :class:`eqcorrscan.core.match_filter.Detection` objects and plot a cumulative detections list. Can take dates as a list of lists and will plot each list separately, e.g. if you have dates from more than one template it will overlay them in different colours. :type dates: list :param dates: Must be a list of lists of datetime.datetime objects :type template_names: list :param template_names: List of the template names in order of the dates :type detections: list :param detections: List of :class:`eqcorrscan.core.match_filter.Detection` :type plot_grouped: bool :param plot_grouped: Plot detections for each template individually, or group them all together - set to False (plot template detections individually) by default. :type rate: bool :param rate: Whether or not to plot the rate of detection per day. Only works for plot_grouped=True :type plot_legend: bool :param plot_legend: Specify whether to plot legend of template names. Defaults to True. :returns: :class:`matplotlib.figure.Figure` .. note:: Can either take lists of :class:`eqcorrscan.core.match_filter.Detection` objects directly, or two lists of dates and template names - either/or, not both. .. rubric:: Example >>> import datetime as dt >>> import numpy as np >>> from eqcorrscan.utils.plotting import cumulative_detections >>> dates = [] >>> for i in range(3): ... dates.append([dt.datetime(2012, 3, 26) + dt.timedelta(n) ... for n in np.random.randn(100)]) >>> cumulative_detections(dates, ['a', 'b', 'c'], ... show=True) # doctest: +SKIP .. plot:: import datetime as dt import numpy as np from eqcorrscan.utils.plotting import cumulative_detections dates = [] for i in range(3): dates.append([dt.datetime(2012, 3, 26) + dt.timedelta(n) for n in np.random.randn(100)]) cumulative_detections(dates, ['a', 'b', 'c'], show=True) .. rubric:: Example 2: Rate plotting >>> import datetime as dt >>> import numpy as np >>> from eqcorrscan.utils.plotting import cumulative_detections >>> dates = [] >>> for i in range(3): ... dates.append([dt.datetime(2012, 3, 26) + dt.timedelta(n) ... for n in np.random.randn(100)]) >>> cumulative_detections(dates, ['a', 'b', 'c'], plot_grouped=True, ... rate=True, show=True) # doctest: +SKIP .. plot:: import datetime as dt import numpy as np from eqcorrscan.utils.plotting import cumulative_detections dates = [] for i in range(3): dates.append([dt.datetime(2012, 3, 26) + dt.timedelta(n) for n in np.random.randn(100)]) cumulative_detections(dates, ['a', 'b', 'c'], plot_grouped=True, rate=True, show=True)
2,348
def _set_factory_context(factory_class, bundle_context): try: context = getattr(factory_class, constants.IPOPO_FACTORY_CONTEXT) except AttributeError: return None if not context.completed: return None context.set_bundle_context(bundle_context) return context
Transforms the context data dictionary into its FactoryContext object form. :param factory_class: A manipulated class :param bundle_context: The class bundle context :return: The factory context, None on error
2,349
def remove_task_db(self, fid, force=False): self.remove_slice_db(fid) sql = self.cursor.execute(sql, [fid, ]) self.check_commit(force=force)
将任务从数据库中删除
2,350
def parse_datetime(value): if not value: return None elif isinstance(value, datetime.datetime): return value return dateutil.parser.parse(value)
Attempts to parse `value` into an instance of ``datetime.datetime``. If `value` is ``None``, this function will return ``None``. Args: value: A timestamp. This can be a string or datetime.datetime value.
2,351
def total_border_pixels_from_mask_and_edge_pixels(mask, edge_pixels, masked_grid_index_to_pixel): border_pixel_total = 0 for i in range(edge_pixels.shape[0]): if check_if_border_pixel(mask, edge_pixels[i], masked_grid_index_to_pixel): border_pixel_total += 1 return border_pixel_total
Compute the total number of borders-pixels in a masks.
2,352
def get_view(self): view = self.view if not view.is_initialized: view.initialize() if not view.proxy_is_active: view.activate_proxy() return view.proxy.widget
Get the root view to display. Make sure it is properly initialized.
2,353
def reset(self): self.indent_spaces = 0 self._buffer[:] = [] self.source = self.code = None self._is_complete = False self._full_dedent = False
Reset the input buffer and associated state.
2,354
def geometry(AA): if(AA==): return GlyGeo() elif(AA==): return AlaGeo() elif(AA==): return SerGeo() elif(AA==): return CysGeo() elif(AA==): return ValGeo() elif(AA==): return IleGeo() elif(AA==): return LeuGeo() elif(AA==): return ThrGeo() elif(AA==): return ArgGeo() elif(AA==): return LysGeo() elif(AA==): return AspGeo() elif(AA==): return GluGeo() elif(AA==): return AsnGeo() elif(AA==): return GlnGeo() elif(AA==): return MetGeo() elif(AA==): return HisGeo() elif(AA==): return ProGeo() elif(AA==): return PheGeo() elif(AA==): return TyrGeo() elif(AA==): return TrpGeo() else: return GlyGeo()
Generates the geometry of the requested amino acid. The amino acid needs to be specified by its single-letter code. If an invalid code is specified, the function returns the geometry of Glycine.
2,355
def post_events(self, events): url = "{0}/{1}/projects/{2}/events".format(self.base_url, self.api_version, self.project_id) headers = utilities.headers(self.write_key) payload = json.dumps(events) response = self.fulfill(HTTPMethods.POST, url, data=payload, headers=headers, timeout=self.post_timeout) self._error_handling(response) return self._get_response_json(response)
Posts a single event to the Keen IO API. The write key must be set first. :param events: an Event to upload
2,356
def parse_file(src): if config.dest_dir == None: dest = src.dir else: dest = config.dest_dir output = get_output(src) output_file = dest + + src.basename + f = open(output_file,) f.write(jsmin.jsmin(output)) f.close() print "Wrote combined and minified file to: %s" % (output_file)
find file in config and output to dest dir
2,357
def get_logger(name, verbosity, stream): logger = logging.getLogger(name) logger.setLevel( {0: DEFAULT_LOGGING_LEVEL, 1: logging.INFO, 2: logging.DEBUG}.get(min(2, verbosity), DEFAULT_LOGGING_LEVEL) ) logger.handlers = [] handler = logging.StreamHandler(stream) handler.setLevel(logging.DEBUG) handler.setFormatter(logging.Formatter(LOG_FORMAT)) logger.addHandler(handler) return logger
Returns simple console logger.
2,358
def select_action(self, pos1, pos2, ctrl, shift): assert pos1.surf.surf_type == pos2.surf.surf_type assert pos1.surf.world_to_obs == pos2.surf.world_to_obs action = sc_pb.Action() action_spatial = pos1.action_spatial(action) if pos1.world_pos == pos2.world_pos: select = action_spatial.unit_selection_point pos1.obs_pos.assign_to(select.selection_screen_coord) mod = sc_spatial.ActionSpatialUnitSelectionPoint if ctrl: select.type = mod.AddAllType if shift else mod.AllType else: select.type = mod.Toggle if shift else mod.Select else: select = action_spatial.unit_selection_rect rect = select.selection_screen_coord.add() pos1.obs_pos.assign_to(rect.p0) pos2.obs_pos.assign_to(rect.p1) select.selection_add = shift units = self._units_in_area(point.Rect(pos1.world_pos, pos2.world_pos)) if units: self.clear_queued_action() return action
Return a `sc_pb.Action` with the selection filled.
2,359
def extract_rzip (archive, compression, cmd, verbosity, interactive, outdir): cmdlist = [cmd, , ] if verbosity > 1: cmdlist.append() outfile = util.get_single_outfile(outdir, archive) cmdlist.extend(["-o", outfile, archive]) return cmdlist
Extract an RZIP archive.
2,360
def combine_assignments(self, assignments): group_by_fn = collections.defaultdict(list) for a in assignments: if not isinstance(a, Assign): raise ValueError("ops should be instances of mtf.Assign") group_by_fn[a.assign_fn].append(a) assignments_set = set(assignments) self._operations = [ op for op in self._operations if op not in assignments_set] ret = [] for fn, ops in six.iteritems(group_by_fn): variables = [] values = [] for a in ops: variables.extend(a.variables) values.extend(a.inputs) ret.append(Assign(variables, values, fn)) return ret
Rewrite the current graph to combine "Assign" operations. Combine similar Assign operations into grouped Assign operations. This is useful when using the rewrite_stack_variables() optimization, since variables can only be stacked if they are present in the same set of Assign operations. This function takes a list of Assign operations and returns a possibly shorter list of Assign operations. The input Assignment operations are removed from the graph and become invalid. Args: assignments: a list of Assign objects Returns: a list of Assign objects
2,361
def which(name): for path in path_val.split(os.pathsep): filename = os.path.join(path, name) if os.access(filename, os.X_OK): return filename return None
Returns the full path to executable in path matching provided name. `name` String value. Returns string or ``None``.
2,362
def collectInterest(self): if self.collectedInterest: return False pg = self.usr.getPage("http://www.neopets.com/bank.phtml") form = pg.form(action="process_bank.phtml") form[] = "interest" pg = form.submit() if "Itpg': pg}) return False
Collects user's daily interest, returns result Returns bool - True if successful, False otherwise
2,363
def y(self, y): if y is None: return None return (self.height * (y - self.box.ymin) / self.box.height)
Project reversed y
2,364
def _copy_circuit_metadata(source_dag, coupling_map): target_dag = DAGCircuit() target_dag.name = source_dag.name for creg in source_dag.cregs.values(): target_dag.add_creg(creg) device_qreg = QuantumRegister(len(coupling_map.physical_qubits), ) target_dag.add_qreg(device_qreg) return target_dag
Return a copy of source_dag with metadata but empty. Generate only a single qreg in the output DAG, matching the size of the coupling_map.
2,365
def extract_output(self, output_variables_list): variables_mapping = self.session_context.session_variables_mapping output = {} for variable in output_variables_list: if variable not in variables_mapping: logger.log_warning( "variable can not be found in variables mapping, failed to output!"\ .format(variable) ) continue output[variable] = variables_mapping[variable] utils.print_info(output) return output
extract output variables
2,366
def _rdistributive(self, expr, op_example): if expr.isliteral: return expr expr_class = expr.__class__ args = (self._rdistributive(arg, op_example) for arg in expr.args) args = tuple(arg.simplify() for arg in args) if len(args) == 1: return args[0] expr = expr_class(*args) dualoperation = op_example.dual if isinstance(expr, dualoperation): expr = expr.distributive() return expr
Recursively flatten the `expr` expression for the `op_example` AND or OR operation instance exmaple.
2,367
def _do_validate_sources_present(self, target): if not self.validate_sources_present: return True sources = target.sources_relative_to_buildroot() if not sources: message = (.format(target.address.spec)) if not self.get_options().allow_empty: raise TaskError(message) else: logging.warn(message) return False return True
Checks whether sources is empty, and either raises a TaskError or just returns False. The specifics of this behavior are defined by whether the user sets --allow-empty to True/False: --allow-empty=False will result in a TaskError being raised in the event of an empty source set. If --allow-empty=True, this method will just return false and log a warning. Shared for all SimpleCodegenTask subclasses to help keep errors consistent and descriptive. :param target: Target to validate. :return: True if sources is not empty, False otherwise.
2,368
def atmos_worker(srcs, window, ij, args): src = srcs[0] rgb = src.read(window=window) rgb = to_math_type(rgb) atmos = simple_atmo(rgb, args["atmo"], args["contrast"], args["bias"]) return scale_dtype(atmos, args["out_dtype"])
A simple atmospheric correction user function.
2,369
def extract_links(self, selector=, *args, **kwargs): try: links = self.get_tree_tag(selector=selector) for link in links: next_url = urljoin(self.url, link.get()) yield type(self)(next_url) except XPathError: raise Exception("Invalid %s selector - %s" % (self.__selector_type__, selector)) except Exception: raise Exception("Invalid %s selector - %s" % (self.__selector_type__, selector))
Method for performing the link extraction for the crawler. \ The selector passed as the argument is a selector to point to the anchor tags \ that the crawler should pass through. A list of links is obtained, and the links \ are iterated through. The relative paths are converted into absolute paths and \ a ``XpathSelector``/``CssSelector`` object (as is the case) is created with the URL of the next page as the argument \ and this created object is yielded. The extract_links method basically generates ``XpathSelector``/``CssSelector`` objects for all of \ the links to be crawled through. :param selector: The selector for the anchor tags to be crawled through :return: A ``XpathSelector``/``CssSelector`` object for every page to be crawled through
2,370
def adupdates_simple(x, g, L, stepsize, inner_stepsizes, niter, random=False): length = len(g) ranges = [Li.range for Li in L] duals = [space.zero() for space in ranges] for _ in range(niter): for i in range(length): x -= (1.0 / stepsize) * L[i].adjoint(duals[i]) rng = np.random.permutation(range(length)) if random else range(length) for j in rng: dual_tmp = ranges[j].element() dual_tmp = (g[j].convex_conj.proximal (stepsize * inner_stepsizes[j] if np.isscalar(inner_stepsizes[j]) else stepsize * np.asarray(inner_stepsizes[j])) (duals[j] + stepsize * inner_stepsizes[j] * L[j](x) if np.isscalar(inner_stepsizes[j]) else duals[j] + stepsize * np.asarray(inner_stepsizes[j]) * L[j](x))) x -= 1.0 / stepsize * L[j].adjoint(dual_tmp - duals[j]) duals[j].assign(dual_tmp)
Non-optimized version of ``adupdates``. This function is intended for debugging. It makes a lot of copies and performs no error checking.
2,371
def get_signalcheck(self, sar, **params): params = sar endpoint = retValue = self._API__request(endpoint, , params=params, convJSON=True) return retValue
get_signalcheck - perform a signal check. Parameters ---------- sar : dict signal-api-request specified as a dictionary of parameters. All of these parameters are optional. For details check https://api.postcode.nl/documentation/signal-api-example. returns : a response dictionary
2,372
def remove_bond(self, particle_pair): from mbuild.port import Port if self.root.bond_graph is None or not self.root.bond_graph.has_edge( *particle_pair): warn("Bond between {} and {} doesnport[$]port[$]')
Deletes a bond between a pair of Particles Parameters ---------- particle_pair : indexable object, length=2, dtype=mb.Compound The pair of Particles to remove the bond between
2,373
def new_chain(table=, chain=None, table_type=None, hook=None, priority=None, family=): ****** ret = {: , : False} if not chain: ret[] = return ret res = check_table(table, family=family) if not res[]: return res res = check_chain(table, chain, family=family) if res[]: ret[] = .\ format(chain, table, family) return ret nft_family = _NFTABLES_FAMILIES[family] cmd = .\ format(_nftables_cmd(), nft_family, table, chain) if table_type or hook or priority: if table_type and hook and six.text_type(priority): cmd = r.\ format(cmd, table_type, hook, priority) else: ret[] = return ret out = __salt__[](cmd, python_shell=False) if not out: ret[] = .\ format(chain, table, family) ret[] = True else: ret[] = .\ format(chain, table, family) return ret
.. versionadded:: 2014.7.0 Create new chain to the specified table. CLI Example: .. code-block:: bash salt '*' nftables.new_chain filter input salt '*' nftables.new_chain filter input \\ table_type=filter hook=input priority=0 salt '*' nftables.new_chain filter foo IPv6: salt '*' nftables.new_chain filter input family=ipv6 salt '*' nftables.new_chain filter input \\ table_type=filter hook=input priority=0 family=ipv6 salt '*' nftables.new_chain filter foo family=ipv6
2,374
def normalize(self): median_diff = np.median(np.diff(self.x)) bin_edges = [self.x[0] - median_diff/2.0] bin_edges.extend(median_diff/2.0 + self.x) self.y_raw = self.y_raw/(self.y_raw.sum()*np.diff(bin_edges)) self.smooth()
Normalizes the given data such that the area under the histogram/curve comes to 1. Also re applies smoothing once done.
2,375
def import_model(cls, ins_name): try: package_space = getattr(cls, ) except AttributeError: raise ValueError() else: return import_object(ins_name, package_space)
Import model class in models package
2,376
def parse(self, what): if not in what: key, spec = what, else: key, spec = what.split() if spec and not spec.startswith((, )): raise ValueError( % what) elif spec == : aids = [] arefs = [] for aid, rec in enumerate(self.assetcol.array): aids.append(aid) arefs.append(self.asset_refs[aid]) elif spec.startswith(): sid = int(spec[4:]) aids = [] arefs = [] for aid, rec in enumerate(self.assetcol.array): if rec[] == sid: aids.append(aid) arefs.append(self.asset_refs[aid]) elif spec.startswith(): arefs = [spec[4:]] aids = [self.str2asset[arefs[0]][]] else: raise ValueError( % what) return aids, arefs, spec, key
:param what: can be 'rlz-1/ref-asset1', 'rlz-2/sid-1', ...
2,377
def add_to_inventory(self): if self.lb_attrs: self.lb_attrs = self.consul.lb_details( self.lb_attrs[A.loadbalancer.ID] ) host = self.lb_attrs[][0][] self.stack.add_lb_secgroup(self.name, [host], self.backend_port) self.stack.add_host( host, [self.name], self.lb_attrs )
Adds lb IPs to stack inventory
2,378
def has_builder(self): try: b = self.builder except AttributeError: b = self.builder = None return b is not None
Return whether this Node has a builder or not. In Boolean tests, this turns out to be a *lot* more efficient than simply examining the builder attribute directly ("if node.builder: ..."). When the builder attribute is examined directly, it ends up calling __getattr__ for both the __len__ and __nonzero__ attributes on instances of our Builder Proxy class(es), generating a bazillion extra calls and slowing things down immensely.
2,379
def sdot( U, V ): nu = U.ndim return np.tensordot( U, V, axes=(nu-1,0) )
Computes the tensorproduct reducing last dimensoin of U with first dimension of V. For matrices, it is equal to regular matrix product.
2,380
def add_fileobj(self, fileobj, path, compress, flags=None): f = file_iter(fileobj) flags = flags or os.stat(path) & 0o777 return self.add_stream(f, path, compress, flags)
Add the contents of a file object to the MAR file. Args: fileobj (file-like object): open file object path (str): name of this file in the MAR file compress (str): One of 'xz', 'bz2', or None. Defaults to None. flags (int): permission of this file in the MAR file. Defaults to the permissions of `path`
2,381
def package_data(pkg, root_list): data = [] for root in root_list: for dirname, _, files in os.walk(os.path.join(pkg, root)): for fname in files: data.append(os.path.relpath(os.path.join(dirname, fname), pkg)) return {pkg: data}
Generic function to find package_data for `pkg` under `root`.
2,382
def init_fakemod_dict(fm,adict=None): dct = {} dct.setdefault(,lambda : True) dct.setdefault(,__file__) if adict is not None: dct.update(adict) fm.__dict__.clear() fm.__dict__.update(dct)
Initialize a FakeModule instance __dict__. Kept as a standalone function and not a method so the FakeModule API can remain basically empty. This should be considered for private IPython use, used in managing namespaces for %run. Parameters ---------- fm : FakeModule instance adict : dict, optional
2,383
def ensure_index(self, index, mappings=None, settings=None, clear=False): mappings = mappings or [] if isinstance(mappings, dict): mappings = [mappings] exists = self.indices.exists_index(index) if exists and not mappings and not clear: return if exists and clear: self.indices.delete_index(index) exists = False if exists: if not mappings: self.indices.delete_index(index) self.indices.refresh() self.indices.create_index(index, settings) return if clear: for maps in mappings: for key in list(maps.keys()): self.indices.delete_mapping(index, doc_type=key) self.indices.refresh() if isinstance(mappings, SettingsBuilder): for name, data in list(mappings.mappings.items()): self.indices.put_mapping(doc_type=name, mapping=data, indices=index) else: from pyes.mappings import DocumentObjectField, ObjectField for maps in mappings: if isinstance(maps, tuple): name, mapping = maps self.indices.put_mapping(doc_type=name, mapping=mapping, indices=index) elif isinstance(maps, dict): for name, data in list(maps.items()): self.indices.put_mapping(doc_type=name, mapping=maps, indices=index) elif isinstance(maps, (DocumentObjectField, ObjectField)): self.put_mapping(doc_type=maps.name, mapping=maps.as_dict(), indices=index) return if settings: if isinstance(settings, dict): settings = SettingsBuilder(settings, mappings) else: if isinstance(mappings, SettingsBuilder): settings = mappings else: settings = SettingsBuilder(mappings=mappings) if not exists: self.indices.create_index(index, settings) self.indices.refresh(index, timesleep=1)
Ensure if an index with mapping exists
2,384
def add_metrics(self, metrics: Iterable[float]) -> None: for metric in metrics: self.add_metric(metric)
Helper to add multiple metrics at once.
2,385
def is_deb_package_installed(pkg): with settings(hide(, , , ), warn_only=True, capture=True): result = sudo( % pkg) return not bool(result.return_code)
checks if a particular deb package is installed
2,386
def ellipsis(text, length, symbol="..."): if len(text) > length: pos = text.rfind(" ", 0, length) if pos < 0: return text[:length].rstrip(".") + symbol else: return text[:pos].rstrip(".") + symbol else: return text
Present a block of text of given length. If the length of available text exceeds the requested length, truncate and intelligently append an ellipsis.
2,387
def _syndic_return(self, load): if any(key not in load for key in (, , )): return None if in load: fstr = .format(self.opts[]) self.mminion.returners[fstr](load[], load[]) for key, item in six.iteritems(load[]): ret = {: load[], : key, : item} if in load: ret[] = load[] self._return(ret)
Receive a syndic minion return and format it to look like returns from individual minions.
2,388
def getSequence(title, db=): titleId = title.split(, 1)[0] try: gi = titleId.split()[1] except IndexError: client.close() return record
Get information about a sequence from Genbank. @param title: A C{str} sequence title from a BLAST hit. Of the form 'gi|63148399|gb|DQ011818.1| Description...'. @param db: The C{str} name of the Entrez database to consult. NOTE: this uses the network! Also, there is a 3 requests/second limit imposed by NCBI on these requests so be careful or your IP will be banned.
2,389
def uptime(human_readable=False): ** startup_time = datetime.datetime.fromtimestamp(psutil.boot_time()) uptime = datetime.datetime.now() - startup_time return six.text_type(uptime) if human_readable else uptime.total_seconds()
.. versionadded:: 2015.8.0 Return the system uptime for the machine Args: human_readable (bool): Return uptime in human readable format if ``True``, otherwise return seconds. Default is ``False`` .. note:: Human readable format is ``days, hours:min:sec``. Days will only be displayed if more than 0 Returns: str: The uptime in seconds or human readable format depending on the value of ``human_readable`` CLI Example: .. code-block:: bash salt '*' status.uptime salt '*' status.uptime human_readable=True
2,390
def p_try_statement_2(self, p): p[0] = ast.Try(statements=p[2], fin=p[3])
try_statement : TRY block finally
2,391
def get_queryset(self, *args, **kwargs): select_sql = {} encrypted_fields = [] for f in self.model._meta.get_fields_with_model(): field = f[0] if isinstance(field, PGPMixin): select_sql[field.name] = self.get_decrypt_sql(field).format( field.model._meta.db_table, field.name, settings.PGPFIELDS_PRIVATE_KEY, ) encrypted_fields.append(field.name) return super(PGPEncryptedManager, self).get_queryset( *args, **kwargs).defer(*encrypted_fields).extra(select=select_sql)
Django queryset.extra() is used here to add decryption sql to query.
2,392
def is_country(self, text): ct_list = self._just_cts.keys() if text in ct_list: return True else: return False
Check if a piece of text is in the list of countries
2,393
def invalidate_cache(user, size=None): sizes = set(settings.AVATAR_AUTO_GENERATE_SIZES) if size is not None: sizes.add(size) for prefix in cached_funcs: for size in sizes: cache.delete(get_cache_key(user, size, prefix))
Function to be called when saving or changing an user's avatars.
2,394
def chimera_layout(G, scale=1., center=None, dim=2): if not isinstance(G, nx.Graph): empty_graph = nx.Graph() empty_graph.add_edges_from(G) G = empty_graph if G.graph.get("family") == "chimera": m = G.graph[] n = G.graph[] t = G.graph[] xy_coords = chimera_node_placer_2d(m, n, t, scale, center, dim) if G.graph.get() == : pos = {v: xy_coords(*v) for v in G.nodes()} elif G.graph.get(): pos = {v: xy_coords(*dat[]) for v, dat in G.nodes(data=True)} else: coord = chimera_coordinates(m, n, t) pos = {v: xy_coords(*coord.tuple(v)) for v in G.nodes()} else: if all( in dat for __, dat in G.nodes(data=True)): chimera_indices = {v: dat[] for v, dat in G.nodes(data=True)} else: chimera_indices = find_chimera_indices(G) m = max(idx[0] for idx in itervalues(chimera_indices)) + 1 n = max(idx[1] for idx in itervalues(chimera_indices)) + 1 t = max(idx[3] for idx in itervalues(chimera_indices)) + 1 xy_coords = chimera_node_placer_2d(m, n, t, scale, center, dim) pos = {v: xy_coords(i, j, u, k) for v, (i, j, u, k) in iteritems(chimera_indices)} return pos
Positions the nodes of graph G in a Chimera cross topology. NumPy (http://scipy.org) is required for this function. Parameters ---------- G : NetworkX graph Should be a Chimera graph or a subgraph of a Chimera graph. If every node in G has a `chimera_index` attribute, those are used to place the nodes. Otherwise makes a best-effort attempt to find positions. scale : float (default 1.) Scale factor. When scale = 1, all positions fit within [0, 1] on the x-axis and [-1, 0] on the y-axis. center : None or array (default None) Coordinates of the top left corner. dim : int (default 2) Number of dimensions. When dim > 2, all extra dimensions are set to 0. Returns ------- pos : dict A dictionary of positions keyed by node. Examples -------- >>> G = dnx.chimera_graph(1) >>> pos = dnx.chimera_layout(G)
2,395
def process_macros(self, content, source=None): macro_options = {: self.relative, : self.linenos} classes = [] for macro_class in self.macros: try: macro = macro_class(logger=self.logger, embed=self.embed, options=macro_options) content, add_classes = macro.process(content, source) if add_classes: classes += add_classes except Exception as e: self.log(u"%s processing failed in %s: %s" % (macro, source, e)) return content, classes
Processed all macros.
2,396
def share_item(self, token, item_id, dest_folder_id): parameters = dict() parameters[] = token parameters[] = item_id parameters[] = dest_folder_id response = self.request(, parameters) return response
Share an item to the destination folder. :param token: A valid token for the user in question. :type token: string :param item_id: The id of the item to be shared. :type item_id: int | long :param dest_folder_id: The id of destination folder where the item is shared to. :type dest_folder_id: int | long :returns: Dictionary containing the details of the shared item. :rtype: dict
2,397
def _compute_slices(self, start_idx, end_idx, assets): return _compute_row_slices( self._first_rows, self._last_rows, self._calendar_offsets, start_idx, end_idx, assets, )
Compute the raw row indices to load for each asset on a query for the given dates after applying a shift. Parameters ---------- start_idx : int Index of first date for which we want data. end_idx : int Index of last date for which we want data. assets : pandas.Int64Index Assets for which we want to compute row indices Returns ------- A 3-tuple of (first_rows, last_rows, offsets): first_rows : np.array[intp] Array with length == len(assets) containing the index of the first row to load for each asset in `assets`. last_rows : np.array[intp] Array with length == len(assets) containing the index of the last row to load for each asset in `assets`. offset : np.array[intp] Array with length == (len(asset) containing the index in a buffer of length `dates` corresponding to the first row of each asset. The value of offset[i] will be 0 if asset[i] existed at the start of a query. Otherwise, offset[i] will be equal to the number of entries in `dates` for which the asset did not yet exist.
2,398
def build(self, builder): builder.start("Question", {}) for translation in self.translations: translation.build(builder) builder.end("Question")
Build XML by appending to builder .. note:: Questions can contain translations
2,399
def parse_image_response(self, response): if in response.headers.get(): xml = xmltodict.parse(response.text) self.analyze_reply_code(xml_response_dict=xml) multi_parts = self._get_multiparts(response) parsed = [] for part in multi_parts: clean_part = part.strip() if in clean_part: header, body = clean_part.split(, 1) else: header = clean_part body = None part_header_dict = {k.strip(): v.strip() for k, v in (h.split(, 1) for h in header.split())} if in part_header_dict.get(): body = body[:body.index() + 2] if in body else body xml = xmltodict.parse(body) try: self.analyze_reply_code(xml_response_dict=xml) except RETSException as e: if e.reply_code == : continue raise e if body: obj = self._response_object_from_header( obj_head_dict=part_header_dict, content=body.encode() if six.PY3 else body) else: obj = self._response_object_from_header(obj_head_dict=part_header_dict) parsed.append(obj) return parsed
Parse multiple objects from the RETS feed. A lot of string methods are used to handle the response before encoding it back into bytes for the object. :param response: The response from the feed :return: list of SingleObjectParser