code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def GetChildrenByPriority(self, allow_external=True): """Generator that yields active filestore children in priority order.""" for child in sorted(self.OpenChildren(), key=lambda x: x.PRIORITY): if not allow_external and child.EXTERNAL: continue if child.Get(child.Schema.ACTIVE): yield child
Generator that yields active filestore children in priority order.
def freeze_parameter(self, name): """ Freeze a parameter by name Args: name: The name of the parameter """ i = self.get_parameter_names(include_frozen=True).index(name) self.unfrozen_mask[i] = False
Freeze a parameter by name Args: name: The name of the parameter
def add_row(self, id_): """Add a new row to the pattern. :param id_: the id of the row """ row = self._parser.new_row(id_) self._rows.append(row) return row
Add a new row to the pattern. :param id_: the id of the row
def boundary_maximum_linear(graph, xxx_todo_changeme1): r""" Boundary term processing adjacent voxels maximum value using a linear relationship. An implementation of a boundary term, suitable to be used with the `~medpy.graphcut.generate.graph_from_voxels` function. The same as `boundary_difference_linear`, but working on the gradient image instead of the original. See there for details. Parameters ---------- graph : GCGraph The graph to add the weights to. gradient_image : ndarray The gradient image. spacing : sequence of float or False A sequence containing the slice spacing used for weighting the computed neighbourhood weight value for different dimensions. If `False`, no distance based weighting of the graph edges is performed. Notes ----- This function requires the gradient image to be passed along. That means that `~medpy.graphcut.generate.graph_from_voxels` has to be called with ``boundary_term_args`` set to the gradient image. """ (gradient_image, spacing) = xxx_todo_changeme1 gradient_image = scipy.asarray(gradient_image) # compute maximum intensity to encounter max_intensity = float(numpy.abs(gradient_image).max()) def boundary_term_linear(intensities): """ Implementation of a linear boundary term computation over an array. """ # normalize the intensity distances to the interval (0, 1] intensities /= max_intensity #difference_to_neighbour[difference_to_neighbour > 1] = 1 # this line should not be required, but might be due to rounding errors intensities = (1. - intensities) # reverse weights such that high intensity difference lead to small weights and hence more likely to a cut at this edge intensities[intensities == 0.] = sys.float_info.min # required to avoid zero values return intensities __skeleton_maximum(graph, gradient_image, boundary_term_linear, spacing)
r""" Boundary term processing adjacent voxels maximum value using a linear relationship. An implementation of a boundary term, suitable to be used with the `~medpy.graphcut.generate.graph_from_voxels` function. The same as `boundary_difference_linear`, but working on the gradient image instead of the original. See there for details. Parameters ---------- graph : GCGraph The graph to add the weights to. gradient_image : ndarray The gradient image. spacing : sequence of float or False A sequence containing the slice spacing used for weighting the computed neighbourhood weight value for different dimensions. If `False`, no distance based weighting of the graph edges is performed. Notes ----- This function requires the gradient image to be passed along. That means that `~medpy.graphcut.generate.graph_from_voxels` has to be called with ``boundary_term_args`` set to the gradient image.
def namedb_get_name_from_name_hash128( cur, name_hash128, block_number ): """ Given the hexlified 128-bit hash of a name, get the name. """ unexpired_query, unexpired_args = namedb_select_where_unexpired_names( block_number ) select_query = "SELECT name FROM name_records JOIN namespaces ON name_records.namespace_id = namespaces.namespace_id " + \ "WHERE name_hash128 = ? AND revoked = 0 AND " + unexpired_query + ";" args = (name_hash128,) + unexpired_args name_rows = namedb_query_execute( cur, select_query, args ) name_row = name_rows.fetchone() if name_row is None: # no such namespace return None return name_row['name']
Given the hexlified 128-bit hash of a name, get the name.
def append_value_continuation(self, linenum, indent, continuation): """ :param linenum: The line number of the frame. :type linenum: int :param indent: The indentation level of the frame. :type indent: int :param continuation: :type continuation: str """ frame = self.current_frame() assert isinstance(frame,FieldFrame) or isinstance(frame,ValueContinuationFrame) if isinstance(frame, FieldFrame): assert frame.indent < indent and frame.container.contains(ROOT_PATH, frame.field_name) if isinstance(frame, ValueContinuationFrame): assert frame.indent == indent and frame.container.contains(ROOT_PATH, frame.field_name) self.pop_frame() field_value = frame.field_value + '\n' + continuation frame.container.put_field(ROOT_PATH, frame.field_name, field_value) frame = ValueContinuationFrame(linenum, indent, frame.path, frame.container, frame.field_name, field_value) self.push_frame(frame)
:param linenum: The line number of the frame. :type linenum: int :param indent: The indentation level of the frame. :type indent: int :param continuation: :type continuation: str
def _distance(self): """Compute the distance function d(f,g,\pi), Eq. (3)""" return np.average(self.min_kl, weights=self.f.weights)
Compute the distance function d(f,g,\pi), Eq. (3)
def match(self, node, results=None): """Override match() to insist on a leaf node.""" if not isinstance(node, Leaf): return False return BasePattern.match(self, node, results)
Override match() to insist on a leaf node.
def get_download_url(self, instance, default=None): """Calculate the download url """ download = default # calculate the download url download = "{url}/@@download/{fieldname}/{filename}".format( url=api.get_url(instance), fieldname=self.get_field_name(), filename=self.get_filename(instance), ) return download
Calculate the download url
def fromMimeData(self, data): """ Paste the clipboard data at the current cursor position. This method also adds another undo-object to the undo-stack. ..note: This method forcefully interrupts the ``QsciInternal`` pasting mechnism by returning an empty MIME data element. This is not an elegant implementation, but the best I could come up with at the moment. """ # Only insert the element if it is available in plain text. if data.hasText(): self.insert(data.text()) # Tell the underlying QsciScintilla object that the MIME data # object was indeed empty. return (QtCore.QByteArray(), False)
Paste the clipboard data at the current cursor position. This method also adds another undo-object to the undo-stack. ..note: This method forcefully interrupts the ``QsciInternal`` pasting mechnism by returning an empty MIME data element. This is not an elegant implementation, but the best I could come up with at the moment.
def heartbeat(self): """Heartbeat request to keep session alive. """ unique_id = self.new_unique_id() message = { 'op': 'heartbeat', 'id': unique_id, } self._send(message) return unique_id
Heartbeat request to keep session alive.
def space_acl(args): ''' Retrieve access control list for a workspace''' r = fapi.get_workspace_acl(args.project, args.workspace) fapi._check_response_code(r, 200) result = dict() for user, info in sorted(r.json()['acl'].items()): result[user] = info['accessLevel'] return result
Retrieve access control list for a workspace
def ruamelindex(self, strictindex): """ Get the ruamel equivalent of a strict parsed index. E.g. 0 -> 0, 1 -> 2, parsed-via-slugify -> Parsed via slugify """ return ( self.key_association.get(strictindex, strictindex) if self.is_mapping() else strictindex )
Get the ruamel equivalent of a strict parsed index. E.g. 0 -> 0, 1 -> 2, parsed-via-slugify -> Parsed via slugify
def send_packet(self, pk, expected_reply=(), resend=False, timeout=0.2): """ Send a packet through the link interface. pk -- Packet to send expect_answer -- True if a packet from the Crazyflie is expected to be sent back, otherwise false """ self._send_lock.acquire() if self.link is not None: if len(expected_reply) > 0 and not resend and \ self.link.needs_resending: pattern = (pk.header,) + expected_reply logger.debug( 'Sending packet and expecting the %s pattern back', pattern) new_timer = Timer(timeout, lambda: self._no_answer_do_retry(pk, pattern)) self._answer_patterns[pattern] = new_timer new_timer.start() elif resend: # Check if we have gotten an answer, if not try again pattern = expected_reply if pattern in self._answer_patterns: logger.debug('We want to resend and the pattern is there') if self._answer_patterns[pattern]: new_timer = Timer(timeout, lambda: self._no_answer_do_retry( pk, pattern)) self._answer_patterns[pattern] = new_timer new_timer.start() else: logger.debug('Resend requested, but no pattern found: %s', self._answer_patterns) self.link.send_packet(pk) self.packet_sent.call(pk) self._send_lock.release()
Send a packet through the link interface. pk -- Packet to send expect_answer -- True if a packet from the Crazyflie is expected to be sent back, otherwise false
def _maybeCleanSessions(self): """ Clean expired sessions if it's been long enough since the last clean. """ sinceLast = self._clock.seconds() - self._lastClean if sinceLast > self.sessionCleanFrequency: self._cleanSessions()
Clean expired sessions if it's been long enough since the last clean.
def _get_best_effort_ndims(x, expect_ndims=None, expect_ndims_at_least=None, expect_ndims_no_more_than=None): """Get static ndims if possible. Fallback on `tf.rank(x)`.""" ndims_static = _get_static_ndims( x, expect_ndims=expect_ndims, expect_ndims_at_least=expect_ndims_at_least, expect_ndims_no_more_than=expect_ndims_no_more_than) if ndims_static is not None: return ndims_static return tf.rank(x)
Get static ndims if possible. Fallback on `tf.rank(x)`.
def total_members_in_score_range_in( self, leaderboard_name, min_score, max_score): ''' Retrieve the total members in a given score range from the named leaderboard. @param leaderboard_name Name of the leaderboard. @param min_score [float] Minimum score. @param max_score [float] Maximum score. @return the total members in a given score range from the named leaderboard. ''' return self.redis_connection.zcount( leaderboard_name, min_score, max_score)
Retrieve the total members in a given score range from the named leaderboard. @param leaderboard_name Name of the leaderboard. @param min_score [float] Minimum score. @param max_score [float] Maximum score. @return the total members in a given score range from the named leaderboard.
def delayed_redraw(self): """Handle delayed redrawing of the canvas.""" # This is the optimized redraw method with self._defer_lock: # pick up the lowest necessary level of redrawing whence = self._defer_whence self._defer_whence = self._defer_whence_reset flag = self._defer_flag self._defer_flag = False if flag: # If a redraw was scheduled, do it now self.redraw_now(whence=whence)
Handle delayed redrawing of the canvas.
def apply_transform(self, matrix): """ Transform mesh by a homogenous transformation matrix. Does the bookkeeping to avoid recomputing things so this function should be used rather than directly modifying self.vertices if possible. Parameters ---------- matrix : (4, 4) float Homogenous transformation matrix """ # get c-order float64 matrix matrix = np.asanyarray(matrix, order='C', dtype=np.float64) # only support homogenous transformations if matrix.shape != (4, 4): raise ValueError('Transformation matrix must be (4,4)!') # exit early if we've been passed an identity matrix # np.allclose is surprisingly slow so do this test elif np.abs(matrix - np.eye(4)).max() < 1e-8: log.debug('apply_tranform passed identity matrix') return # new vertex positions new_vertices = transformations.transform_points( self.vertices, matrix=matrix) # overridden center of mass if self._center_mass is not None: self._center_mass = transformations.transform_points( np.array([self._center_mass, ]), matrix)[0] # preserve face normals if we have them stored new_face_normals = None if 'face_normals' in self._cache: # transform face normals by rotation component new_face_normals = util.unitize( transformations.transform_points( self.face_normals, matrix=matrix, translate=False)) # preserve vertex normals if we have them stored new_vertex_normals = None if 'vertex_normals' in self._cache: new_vertex_normals = util.unitize( transformations.transform_points( self.vertex_normals, matrix=matrix, translate=False)) # a test triangle pre and post transform triangle_pre = self.vertices[self.faces[:5]] # we don't care about scale so make sure they aren't tiny triangle_pre /= np.abs(triangle_pre).max() # do the same for the post- transform test triangle_post = new_vertices[self.faces[:5]] triangle_post /= np.abs(triangle_post).max() # compute triangle normal before and after transform normal_pre, valid_pre = triangles.normals(triangle_pre) normal_post, valid_post = triangles.normals(triangle_post) # check the first few faces against normals to check winding aligned_pre = triangles.windings_aligned(triangle_pre[valid_pre], normal_pre) # windings aligned after applying transform aligned_post = triangles.windings_aligned(triangle_post[valid_post], normal_post) # convert multiple face checks to single bool, allowing outliers pre = (aligned_pre.sum() / float(len(aligned_pre))) > .6 post = (aligned_post.sum() / float(len(aligned_post))) > .6 if pre != post: log.debug('transform flips winding') # fliplr will make array non C contiguous, which will # cause hashes to be more expensive than necessary self.faces = np.ascontiguousarray(np.fliplr(self.faces)) # assign the new values self.vertices = new_vertices # may be None if we didn't have them previously self.face_normals = new_face_normals self.vertex_normals = new_vertex_normals # preserve normals and topology in cache # while dumping everything else self._cache.clear(exclude=[ 'face_normals', # transformed by us 'face_adjacency', # topological 'face_adjacency_edges', 'face_adjacency_unshared', 'edges', 'edges_sorted', 'edges_unique', 'edges_sparse', 'body_count', 'faces_unique_edges', 'euler_number', 'vertex_normals']) # set the cache ID with the current hash value self._cache.id_set() log.debug('mesh transformed by matrix') return self
Transform mesh by a homogenous transformation matrix. Does the bookkeeping to avoid recomputing things so this function should be used rather than directly modifying self.vertices if possible. Parameters ---------- matrix : (4, 4) float Homogenous transformation matrix
def update_browsers(self, *args, **kwargs): """Update the shot and the assetbrowsers :returns: None :rtype: None :raises: None """ sel = self.prjbrws.selected_indexes(0) if not sel: return prjindex = sel[0] if not prjindex.isValid(): prj = None else: prjitem = prjindex.internalPointer() prj = prjitem.internal_data() self.set_project_banner(prj) releasetype = self.get_releasetype() self.update_shot_browser(prj, releasetype) self.update_asset_browser(prj, releasetype)
Update the shot and the assetbrowsers :returns: None :rtype: None :raises: None
def train(self): """Runs one logical iteration of training. Subclasses should override ``_train()`` instead to return results. This class automatically fills the following fields in the result: `done` (bool): training is terminated. Filled only if not provided. `time_this_iter_s` (float): Time in seconds this iteration took to run. This may be overriden in order to override the system-computed time difference. `time_total_s` (float): Accumulated time in seconds for this entire experiment. `experiment_id` (str): Unique string identifier for this experiment. This id is preserved across checkpoint / restore calls. `training_iteration` (int): The index of this training iteration, e.g. call to train(). `pid` (str): The pid of the training process. `date` (str): A formatted date of when the result was processed. `timestamp` (str): A UNIX timestamp of when the result was processed. `hostname` (str): Hostname of the machine hosting the training process. `node_ip` (str): Node ip of the machine hosting the training process. Returns: A dict that describes training progress. """ start = time.time() result = self._train() assert isinstance(result, dict), "_train() needs to return a dict." # We do not modify internal state nor update this result if duplicate. if RESULT_DUPLICATE in result: return result result = result.copy() self._iteration += 1 self._iterations_since_restore += 1 if result.get(TIME_THIS_ITER_S) is not None: time_this_iter = result[TIME_THIS_ITER_S] else: time_this_iter = time.time() - start self._time_total += time_this_iter self._time_since_restore += time_this_iter result.setdefault(DONE, False) # self._timesteps_total should only be tracked if increments provided if result.get(TIMESTEPS_THIS_ITER) is not None: if self._timesteps_total is None: self._timesteps_total = 0 self._timesteps_total += result[TIMESTEPS_THIS_ITER] self._timesteps_since_restore += result[TIMESTEPS_THIS_ITER] # self._episodes_total should only be tracked if increments provided if result.get(EPISODES_THIS_ITER) is not None: if self._episodes_total is None: self._episodes_total = 0 self._episodes_total += result[EPISODES_THIS_ITER] # self._timesteps_total should not override user-provided total result.setdefault(TIMESTEPS_TOTAL, self._timesteps_total) result.setdefault(EPISODES_TOTAL, self._episodes_total) result.setdefault(TRAINING_ITERATION, self._iteration) # Provides auto-filled neg_mean_loss for avoiding regressions if result.get("mean_loss"): result.setdefault("neg_mean_loss", -result["mean_loss"]) now = datetime.today() result.update( experiment_id=self._experiment_id, date=now.strftime("%Y-%m-%d_%H-%M-%S"), timestamp=int(time.mktime(now.timetuple())), time_this_iter_s=time_this_iter, time_total_s=self._time_total, pid=os.getpid(), hostname=os.uname()[1], node_ip=self._local_ip, config=self.config, time_since_restore=self._time_since_restore, timesteps_since_restore=self._timesteps_since_restore, iterations_since_restore=self._iterations_since_restore) self._log_result(result) return result
Runs one logical iteration of training. Subclasses should override ``_train()`` instead to return results. This class automatically fills the following fields in the result: `done` (bool): training is terminated. Filled only if not provided. `time_this_iter_s` (float): Time in seconds this iteration took to run. This may be overriden in order to override the system-computed time difference. `time_total_s` (float): Accumulated time in seconds for this entire experiment. `experiment_id` (str): Unique string identifier for this experiment. This id is preserved across checkpoint / restore calls. `training_iteration` (int): The index of this training iteration, e.g. call to train(). `pid` (str): The pid of the training process. `date` (str): A formatted date of when the result was processed. `timestamp` (str): A UNIX timestamp of when the result was processed. `hostname` (str): Hostname of the machine hosting the training process. `node_ip` (str): Node ip of the machine hosting the training process. Returns: A dict that describes training progress.
def get_language(query: str) -> str: """Tries to work out the highlight.js language of a given file name or shebang. Returns an empty string if none match. """ query = query.lower() for language in LANGUAGES: if query.endswith(language): return language return ''
Tries to work out the highlight.js language of a given file name or shebang. Returns an empty string if none match.
def setCheckedRecords(self, records, column=0, parent=None): """ Sets the checked items based on the inputed list of records. :param records | [<orb.Table>, ..] parent | <QTreeWidgetItem> || None """ if parent is None: for i in range(self.topLevelItemCount()): item = self.topLevelItem(i) try: has_record = item.record() in records except AttributeError: has_record = False if has_record: item.setCheckState(column, Qt.Checked) self.setCheckedRecords(records, column, item) else: for c in range(parent.childCount()): item = parent.child(c) try: has_record = item.record() in records except AttributeError: has_record = False if has_record: item.setCheckState(column, Qt.Checked) self.setCheckedRecords(records, column, item)
Sets the checked items based on the inputed list of records. :param records | [<orb.Table>, ..] parent | <QTreeWidgetItem> || None
def save(self, request): """ Saves a new comment and sends any notification emails. """ comment = self.get_comment_object() obj = comment.content_object if request.user.is_authenticated(): comment.user = request.user comment.by_author = request.user == getattr(obj, "user", None) comment.ip_address = ip_for_request(request) comment.replied_to_id = self.data.get("replied_to") # YaCms's duplicate check that also checks `replied_to_id`. lookup = { "content_type": comment.content_type, "object_pk": comment.object_pk, "user_name": comment.user_name, "user_email": comment.user_email, "user_url": comment.user_url, "replied_to_id": comment.replied_to_id, } for duplicate in self.get_comment_model().objects.filter(**lookup): if (duplicate.submit_date.date() == comment.submit_date.date() and duplicate.comment == comment.comment): return duplicate comment.save() comment_was_posted.send(sender=comment.__class__, comment=comment, request=request) notify_emails = split_addresses(settings.COMMENTS_NOTIFICATION_EMAILS) if notify_emails: subject = ugettext("New comment for: ") + str(obj) context = { "comment": comment, "comment_url": add_cache_bypass(comment.get_absolute_url()), "request": request, "obj": obj, } send_mail_template(subject, "email/comment_notification", settings.DEFAULT_FROM_EMAIL, notify_emails, context) return comment
Saves a new comment and sends any notification emails.
def publish(self, message): """ Publishes the message to all subscribers of this topic :param message: (object), the message to be published. """ message_data = self._to_data(message) self._encode_invoke(topic_publish_codec, message=message_data)
Publishes the message to all subscribers of this topic :param message: (object), the message to be published.
def _set_alert(self, v, load=False): """ Setter method for alert, mapped from YANG variable /rbridge_id/threshold_monitor/interface/policy/area/alert (container) If this variable is read-only (config: false) in the source YANG file, then _set_alert is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_alert() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=alert.alert, is_container='container', presence=False, yang_name="alert", rest_name="alert", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Alert configuration', u'cli-suppress-show-conf-path': None, u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-threshold-monitor', defining_module='brocade-threshold-monitor', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """alert must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=alert.alert, is_container='container', presence=False, yang_name="alert", rest_name="alert", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Alert configuration', u'cli-suppress-show-conf-path': None, u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-threshold-monitor', defining_module='brocade-threshold-monitor', yang_type='container', is_config=True)""", }) self.__alert = t if hasattr(self, '_set'): self._set()
Setter method for alert, mapped from YANG variable /rbridge_id/threshold_monitor/interface/policy/area/alert (container) If this variable is read-only (config: false) in the source YANG file, then _set_alert is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_alert() directly.
def is_pure_name(path): """ Return True if path is a name and not a file path. Parameters ---------- path : str Path (can be absolute, relative, etc.) Returns ------- bool True if path is a name of config in config dir, not file path. """ return ( not os.path.isabs(path) and len(os.path.dirname(path)) == 0 and not os.path.splitext(path)[1] and path != '.' and path != '' )
Return True if path is a name and not a file path. Parameters ---------- path : str Path (can be absolute, relative, etc.) Returns ------- bool True if path is a name of config in config dir, not file path.
def _flatten_lane_details(runinfo): """Provide flattened lane information with multiplexed barcodes separated. """ out = [] for ldetail in runinfo["details"]: # handle controls if "project_name" not in ldetail and ldetail["description"] == "control": ldetail["project_name"] = "control" for i, barcode in enumerate(ldetail.get("multiplex", [{}])): cur = copy.deepcopy(ldetail) cur["name"] = "%s-%s" % (ldetail["name"], i + 1) cur["description"] = barcode.get("name", ldetail["description"]) cur["bc_index"] = barcode.get("sequence", "") cur["project_name"] = clean_name(ldetail["project_name"]) out.append(cur) return out
Provide flattened lane information with multiplexed barcodes separated.
def dbg(*objects, file=sys.stderr, flush=True, **kwargs): "Helper function to print to stderr and flush" print(*objects, file=file, flush=flush, **kwargs)
Helper function to print to stderr and flush
def GetOrderKey(self): """Return a tuple that can be used to sort problems into a consistent order. Returns: A list of values. """ context_attributes = ['_type'] context_attributes.extend(ExceptionWithContext.CONTEXT_PARTS) context_attributes.extend(self._GetExtraOrderAttributes()) tokens = [] for context_attribute in context_attributes: tokens.append(getattr(self, context_attribute, None)) return tokens
Return a tuple that can be used to sort problems into a consistent order. Returns: A list of values.
def get_option(self, option): """ Get a configuration option, trying the options attribute first and falling back to a Django project setting. """ value = getattr(self, option, None) if value is not None: return value return getattr(settings, "COUNTRIES_{0}".format(option.upper()))
Get a configuration option, trying the options attribute first and falling back to a Django project setting.
def swd_write(self, output, value, nbits): """Writes bytes over SWD (Serial Wire Debug). Args: self (JLink): the ``JLink`` instance output (int): the output buffer offset to write to value (int): the value to write to the output buffer nbits (int): the number of bits needed to represent the ``output`` and ``value`` Returns: The bit position of the response in the input buffer. """ pDir = binpacker.pack(output, nbits) pIn = binpacker.pack(value, nbits) bitpos = self._dll.JLINK_SWD_StoreRaw(pDir, pIn, nbits) if bitpos < 0: raise errors.JLinkException(bitpos) return bitpos
Writes bytes over SWD (Serial Wire Debug). Args: self (JLink): the ``JLink`` instance output (int): the output buffer offset to write to value (int): the value to write to the output buffer nbits (int): the number of bits needed to represent the ``output`` and ``value`` Returns: The bit position of the response in the input buffer.
def operations(self, op_types=None): """Process operation stream.""" if not op_types: op_types = ['message', 'action', 'sync', 'viewlock', 'savedchapter'] while self._handle.tell() < self._eof: current_time = mgz.util.convert_to_timestamp(self._time / 1000) try: operation = mgz.body.operation.parse_stream(self._handle) except (ConstructError, ValueError): raise MgzError('failed to parse body operation') if operation.type == 'action': if operation.action.type in ACTIONS_WITH_PLAYER_ID: counter = self._actions_by_player[operation.action.player_id] counter.update([operation.action.type]) else: self._actions_without_player.update([operation.action.type]) if operation.type == 'action' and isinstance(operation.action.type, int): print(operation.action) if operation.type == 'sync': self._time += operation.time_increment if operation.type == 'action' and operation.action.type == 'postgame': self._postgame = operation if operation.type == 'action': action = Action(operation, current_time) self._parse_action(action, current_time) if operation.type == 'savedchapter': # fix: Don't load messages we already saw in header or prev saved chapters self._parse_lobby_chat(operation.lobby.messages, 'save', current_time) if operation.type == 'viewlock': if operation.type in op_types: yield Viewlock(operation) elif operation.type == 'action' and operation.action.type != 'postgame': if operation.type in op_types: yield Action(operation, current_time) elif ((operation.type == 'message' or operation.type == 'embedded') and operation.subtype == 'chat'): chat = ChatMessage(operation.data.text, current_time, self._players(), self._diplomacy['type'], 'game') self._parse_chat(chat) if operation.type in op_types: yield chat
Process operation stream.
def top(num_processes=5, interval=3): ''' Return a list of top CPU consuming processes during the interval. num_processes = return the top N CPU consuming processes interval = the number of seconds to sample CPU usage over CLI Examples: .. code-block:: bash salt '*' ps.top salt '*' ps.top 5 10 ''' result = [] start_usage = {} for pid in psutil.pids(): try: process = psutil.Process(pid) user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue start_usage[process] = user + system time.sleep(interval) usage = set() for process, start in six.iteritems(start_usage): try: user, system = process.cpu_times() except ValueError: user, system, _, _ = process.cpu_times() except psutil.NoSuchProcess: continue now = user + system diff = now - start usage.add((diff, process)) for idx, (diff, process) in enumerate(reversed(sorted(usage))): if num_processes and idx >= num_processes: break if not _get_proc_cmdline(process): cmdline = _get_proc_name(process) else: cmdline = _get_proc_cmdline(process) info = {'cmd': cmdline, 'user': _get_proc_username(process), 'status': _get_proc_status(process), 'pid': _get_proc_pid(process), 'create_time': _get_proc_create_time(process), 'cpu': {}, 'mem': {}, } for key, value in six.iteritems(process.cpu_times()._asdict()): info['cpu'][key] = value for key, value in six.iteritems(process.memory_info()._asdict()): info['mem'][key] = value result.append(info) return result
Return a list of top CPU consuming processes during the interval. num_processes = return the top N CPU consuming processes interval = the number of seconds to sample CPU usage over CLI Examples: .. code-block:: bash salt '*' ps.top salt '*' ps.top 5 10
def check_columns(self, check_views=True): """Check if the columns in all tables are equals. Parameters ---------- check_views: bool if True, check the columns of all the tables and views, if False check only the columns of the tables Returns ------- bool True if the columns are the same False otherwise list A list with the differences """ if check_views: query = """WITH table_list AS ( SELECT table_schema, table_name FROM information_schema.tables WHERE table_schema NOT IN {} AND table_schema NOT LIKE 'pg\_%' ORDER BY table_schema,table_name ) SELECT isc.table_schema, isc.table_name, column_name, column_default, is_nullable, data_type, character_maximum_length::text, numeric_precision::text, numeric_precision_radix::text, datetime_precision::text FROM information_schema.columns isc, table_list tl WHERE isc.table_schema = tl.table_schema AND isc.table_name = tl.table_name ORDER BY isc.table_schema, isc.table_name, column_name """.format(self.exclude_schema) else: query = """WITH table_list AS ( SELECT table_schema, table_name FROM information_schema.tables WHERE table_schema NOT IN {} AND table_schema NOT LIKE 'pg\_%' AND table_type NOT LIKE 'VIEW' ORDER BY table_schema,table_name ) SELECT isc.table_schema, isc.table_name, column_name, column_default, is_nullable, data_type, character_maximum_length::text, numeric_precision::text, numeric_precision_radix::text, datetime_precision::text FROM information_schema.columns isc, table_list tl WHERE isc.table_schema = tl.table_schema AND isc.table_name = tl.table_name ORDER BY isc.table_schema, isc.table_name, column_name """.format(self.exclude_schema) return self.__check_equals(query)
Check if the columns in all tables are equals. Parameters ---------- check_views: bool if True, check the columns of all the tables and views, if False check only the columns of the tables Returns ------- bool True if the columns are the same False otherwise list A list with the differences
def _parse_bands(self, band_input): """ Parses class input and verifies band names. :param band_input: input parameter `bands` :type band_input: str or list(str) or None :return: verified list of bands :rtype: list(str) """ all_bands = AwsConstants.S2_L1C_BANDS if self.data_source is DataSource.SENTINEL2_L1C else \ AwsConstants.S2_L2A_BANDS if band_input is None: return all_bands if isinstance(band_input, str): band_list = band_input.split(',') elif isinstance(band_input, list): band_list = band_input.copy() else: raise ValueError('bands parameter must be a list or a string') band_list = [band.strip().split('.')[0] for band in band_list] band_list = [band for band in band_list if band != ''] if not set(band_list) <= set(all_bands): raise ValueError('bands {} must be a subset of {}'.format(band_list, all_bands)) return band_list
Parses class input and verifies band names. :param band_input: input parameter `bands` :type band_input: str or list(str) or None :return: verified list of bands :rtype: list(str)
def tril(array, k=0): '''Lower triangle of an array. Return a copy of an array with elements above the k-th diagonal zeroed. Need a multi-dimensional version here because numpy.tril does not broadcast for numpy verison < 1.9.''' try: tril_array = np.tril(array, k=k) except: # have to loop tril_array = np.zeros_like(array) shape = array.shape otherdims = shape[:-2] for index in np.ndindex(otherdims): tril_array[index] = np.tril(array[index], k=k) return tril_array
Lower triangle of an array. Return a copy of an array with elements above the k-th diagonal zeroed. Need a multi-dimensional version here because numpy.tril does not broadcast for numpy verison < 1.9.
def encrypt(self, wif): """ Encrypt the content according to BIP38 :param str wif: Unencrypted key """ if not self.unlocked(): raise WalletLocked return format(bip38.encrypt(str(wif), self.masterkey), "encwif")
Encrypt the content according to BIP38 :param str wif: Unencrypted key
def find_requirement(self, req, upgrade, ignore_compatibility=False): # type: (InstallRequirement, bool, bool) -> Optional[Link] """Try to find a Link matching req Expects req, an InstallRequirement and upgrade, a boolean Returns a Link if found, Raises DistributionNotFound or BestVersionAlreadyInstalled otherwise """ all_candidates = self.find_all_candidates(req.name) # Filter out anything which doesn't match our specifier compatible_versions = set( req.specifier.filter( # We turn the version object into a str here because otherwise # when we're debundled but setuptools isn't, Python will see # packaging.version.Version and # pkg_resources._vendor.packaging.version.Version as different # types. This way we'll use a str as a common data interchange # format. If we stop using the pkg_resources provided specifier # and start using our own, we can drop the cast to str(). [str(c.version) for c in all_candidates], prereleases=( self.allow_all_prereleases if self.allow_all_prereleases else None ), ) ) applicable_candidates = [ # Again, converting to str to deal with debundling. c for c in all_candidates if str(c.version) in compatible_versions ] if applicable_candidates: best_candidate = max(applicable_candidates, key=self._candidate_sort_key) else: best_candidate = None if req.satisfied_by is not None: installed_version = parse_version(req.satisfied_by.version) else: installed_version = None if installed_version is None and best_candidate is None: logger.critical( 'Could not find a version that satisfies the requirement %s ' '(from versions: %s)', req, ', '.join( sorted( {str(c.version) for c in all_candidates}, key=parse_version, ) ) ) raise DistributionNotFound( 'No matching distribution found for %s' % req ) best_installed = False if installed_version and ( best_candidate is None or best_candidate.version <= installed_version): best_installed = True if not upgrade and installed_version is not None: if best_installed: logger.debug( 'Existing installed version (%s) is most up-to-date and ' 'satisfies requirement', installed_version, ) else: logger.debug( 'Existing installed version (%s) satisfies requirement ' '(most up-to-date version is %s)', installed_version, best_candidate.version, ) return None if best_installed: # We have an existing version, and its the best version logger.debug( 'Installed version (%s) is most up-to-date (past versions: ' '%s)', installed_version, ', '.join(sorted(compatible_versions, key=parse_version)) or "none", ) raise BestVersionAlreadyInstalled logger.debug( 'Using version %s (newest of versions: %s)', best_candidate.version, ', '.join(sorted(compatible_versions, key=parse_version)) ) return best_candidate.location
Try to find a Link matching req Expects req, an InstallRequirement and upgrade, a boolean Returns a Link if found, Raises DistributionNotFound or BestVersionAlreadyInstalled otherwise
def select_edges_by(docgraph, layer=None, edge_type=None, data=False): """ get all edges with the given edge type and layer. Parameters ---------- docgraph : DiscourseDocumentGraph document graph from which the nodes will be extracted layer : str name of the layer edge_type : str Type of the edges to be extracted (Edge types are defined in the Enum ``EdgeTypes``). data : bool If True, results will include edge attributes. Returns ------- edges : generator of str a container/list of edges (represented as (source node ID, target node ID) tuples). If data is True, edges are represented as (source node ID, target node ID, edge attribute dict) tuples. """ edge_type_eval = "edge_attribs['edge_type'] == '{}'".format(edge_type) layer_eval = "'{}' in edge_attribs['layers']".format(layer) if layer is not None: if edge_type is not None: return select_edges(docgraph, data=data, conditions=[edge_type_eval, layer_eval]) else: # filter by layer, but not by edge type return select_edges(docgraph, conditions=[layer_eval], data=data) else: # don't filter layers if edge_type is not None: # filter by edge type, but not by layer return select_edges(docgraph, data=data, conditions=[edge_type_eval]) else: # neither layer, nor edge type is filtered return docgraph.edges_iter(data=data)
get all edges with the given edge type and layer. Parameters ---------- docgraph : DiscourseDocumentGraph document graph from which the nodes will be extracted layer : str name of the layer edge_type : str Type of the edges to be extracted (Edge types are defined in the Enum ``EdgeTypes``). data : bool If True, results will include edge attributes. Returns ------- edges : generator of str a container/list of edges (represented as (source node ID, target node ID) tuples). If data is True, edges are represented as (source node ID, target node ID, edge attribute dict) tuples.
def create_nginx_config(self): """ Creates the Nginx configuration for the project """ cfg = '# nginx config for {0}\n'.format(self._project_name) if not self._shared_hosting: # user if self._user: cfg += 'user {0};\n'.format(self._user) # misc nginx config cfg += 'worker_processes 1;\nerror_log {0}-errors.log;\n\ pid {1}_ nginx.pid;\n\n'.format(os.path.join(self._log_dir, \ self._project_name), os.path.join(self._var_dir, self._project_name)) cfg += 'events {\n\tworker_connections 32;\n}\n\n' # http section cfg += 'http {\n' if self._include_mimetypes: cfg += '\tinclude mime.types;\n' cfg += '\tdefault_type application/octet-stream;\n' cfg += '\tclient_max_body_size 1G;\n' cfg += '\tproxy_max_temp_file_size 0;\n' cfg += '\tproxy_buffering off;\n' cfg += '\taccess_log {0}-access.log;\n'.format(os.path.join \ (self._log_dir, self._project_name)) cfg += '\tsendfile on;\n' cfg += '\tkeepalive_timeout 65;\n' # server section cfg += '\tserver {\n' cfg += '\t\tlisten 0.0.0.0:{0};\n'.format(self._port) if self._server_name: cfg += '\t\tserver_name {0};\n'.format(self._server_name) # location section cfg += '\t\tlocation / {\n' cfg += '\t\t\tuwsgi_pass unix:///{0}.sock;\n'.format(\ os.path.join(self._var_dir, self._project_name)) cfg += '\t\t\tinclude uwsgi_params;\n' cfg += '\t\t}\n\n' # end location # error page templates cfg += '\t\terror_page 500 502 503 504 /50x.html;\n' cfg += '\t\tlocation = /50x.html {\n' cfg += '\t\t\troot html;\n' # end error page section cfg += '\t\t}\n' # end server section cfg += '\t}\n' if not self._shared_hosting: # end http section cfg += '}\n' # create conf f = open(self._nginx_config, 'w') f.write(cfg) f.close()
Creates the Nginx configuration for the project
def __update_offset_table(self, fileobj, fmt, atom, delta, offset): """Update offset table in the specified atom.""" if atom.offset > offset: atom.offset += delta fileobj.seek(atom.offset + 12) data = fileobj.read(atom.length - 12) fmt = fmt % cdata.uint_be(data[:4]) offsets = struct.unpack(fmt, data[4:]) offsets = [o + (0, delta)[offset < o] for o in offsets] fileobj.seek(atom.offset + 16) fileobj.write(struct.pack(fmt, *offsets))
Update offset table in the specified atom.
def on_left_click(self, event, grid, choices): """ creates popup menu when user clicks on the column if that column is in the list of choices that get a drop-down menu. allows user to edit the column, but only from available values """ row, col = event.GetRow(), event.GetCol() if col == 0 and self.grid.name != 'ages': default_val = self.grid.GetCellValue(row, col) msg = "Choose a new name for {}.\nThe new value will propagate throughout the contribution.".format(default_val) dia = wx.TextEntryDialog(self.grid, msg, "Rename {}".format(self.grid.name, default_val), default_val) res = dia.ShowModal() if res == wx.ID_OK: new_val = dia.GetValue() # update the contribution with new name self.contribution.rename_item(self.grid.name, default_val, new_val) # don't propagate changes if we are just assigning a new name # and not really renaming # (i.e., if a blank row was added then named) if default_val == '': self.grid.SetCellValue(row, 0, new_val) return # update the current grid with new name for row in range(self.grid.GetNumberRows()): cell_value = self.grid.GetCellValue(row, 0) if cell_value == default_val: self.grid.SetCellValue(row, 0, new_val) else: continue return color = self.grid.GetCellBackgroundColour(event.GetRow(), event.GetCol()) # allow user to cherry-pick cells for editing. # gets selection of meta key for mac, ctrl key for pc if event.ControlDown() or event.MetaDown(): row, col = event.GetRow(), event.GetCol() if (row, col) not in self.dispersed_selection: self.dispersed_selection.append((row, col)) self.grid.SetCellBackgroundColour(row, col, 'light blue') else: self.dispersed_selection.remove((row, col)) self.grid.SetCellBackgroundColour(row, col, color)# 'white' self.grid.ForceRefresh() return if event.ShiftDown(): # allow user to highlight multiple consecutive cells in a column previous_col = self.grid.GetGridCursorCol() previous_row = self.grid.GetGridCursorRow() col = event.GetCol() row = event.GetRow() if col != previous_col: return else: if row > previous_row: row_range = list(range(previous_row, row+1)) else: row_range = list(range(row, previous_row+1)) for r in row_range: self.grid.SetCellBackgroundColour(r, col, 'light blue') self.selection.append((r, col)) self.grid.ForceRefresh() return selection = False if self.dispersed_selection: is_dispersed = True selection = self.dispersed_selection if self.selection: is_dispersed = False selection = self.selection try: col = event.GetCol() row = event.GetRow() except AttributeError: row, col = selection[0][0], selection[0][1] self.grid.SetGridCursor(row, col) if col in list(choices.keys()): # column should have a pop-up menu menu = wx.Menu() two_tiered = choices[col][1] choices = choices[col][0] if not two_tiered: # menu is one tiered if 'CLEAR cell of all values' not in choices: choices.insert(0, 'CLEAR cell of all values') for choice in choices: if not choice: choice = " " # prevents error if choice is an empty string menuitem = menu.Append(wx.ID_ANY, str(choice)) self.window.Bind(wx.EVT_MENU, lambda event: self.on_select_menuitem(event, grid, row, col, selection), menuitem) self.show_menu(event, menu) else: # menu is two_tiered clear = menu.Append(-1, 'CLEAR cell of all values') self.window.Bind(wx.EVT_MENU, lambda event: self.on_select_menuitem(event, grid, row, col, selection), clear) for choice in sorted(choices.items()): submenu = wx.Menu() for item in choice[1]: menuitem = submenu.Append(-1, str(item)) self.window.Bind(wx.EVT_MENU, lambda event: self.on_select_menuitem(event, grid, row, col, selection), menuitem) menu.Append(-1, choice[0], submenu) self.show_menu(event, menu) if selection: # re-whiten the cells that were previously highlighted for row, col in selection: self.grid.SetCellBackgroundColour(row, col, self.col_color) self.dispersed_selection = [] self.selection = [] self.grid.ForceRefresh()
creates popup menu when user clicks on the column if that column is in the list of choices that get a drop-down menu. allows user to edit the column, but only from available values
def set_insn(self, insn): """ Set a new raw buffer to disassemble :param insn: the buffer :type insn: string """ self.insn = insn self.size = len(self.insn)
Set a new raw buffer to disassemble :param insn: the buffer :type insn: string
def get_referenced_object(self): """ :rtype: core.BunqModel :raise: BunqException """ if self._BunqMeTab is not None: return self._BunqMeTab if self._BunqMeTabResultResponse is not None: return self._BunqMeTabResultResponse if self._BunqMeFundraiserResult is not None: return self._BunqMeFundraiserResult if self._Card is not None: return self._Card if self._CardDebit is not None: return self._CardDebit if self._DraftPayment is not None: return self._DraftPayment if self._FeatureAnnouncement is not None: return self._FeatureAnnouncement if self._IdealMerchantTransaction is not None: return self._IdealMerchantTransaction if self._Invoice is not None: return self._Invoice if self._ScheduledPayment is not None: return self._ScheduledPayment if self._ScheduledPaymentBatch is not None: return self._ScheduledPaymentBatch if self._ScheduledInstance is not None: return self._ScheduledInstance if self._MasterCardAction is not None: return self._MasterCardAction if self._BankSwitchServiceNetherlandsIncomingPayment is not None: return self._BankSwitchServiceNetherlandsIncomingPayment if self._Payment is not None: return self._Payment if self._PaymentBatch is not None: return self._PaymentBatch if self._RequestInquiryBatch is not None: return self._RequestInquiryBatch if self._RequestInquiry is not None: return self._RequestInquiry if self._RequestResponse is not None: return self._RequestResponse if self._RewardRecipient is not None: return self._RewardRecipient if self._RewardSender is not None: return self._RewardSender if self._ShareInviteBankInquiryBatch is not None: return self._ShareInviteBankInquiryBatch if self._ShareInviteBankInquiry is not None: return self._ShareInviteBankInquiry if self._ShareInviteBankResponse is not None: return self._ShareInviteBankResponse if self._SofortMerchantTransaction is not None: return self._SofortMerchantTransaction if self._TabResultInquiry is not None: return self._TabResultInquiry if self._TabResultResponse is not None: return self._TabResultResponse if self._TransferwiseTransfer is not None: return self._TransferwiseTransfer raise exception.BunqException(self._ERROR_NULL_FIELDS)
:rtype: core.BunqModel :raise: BunqException
def _docstring_parse(self, blocks): """Parses the XML from the specified blocks of docstrings.""" result = {} for block, docline, doclength, key in blocks: doctext = "<doc>{}</doc>".format(" ".join(block)) try: docs = ET.XML(doctext) docstart = self.parser.charindex(docline, 0, self.context) if not key in result: result[key] = [list(docs), docstart, docstart + doclength] else: #If there are docblocks separated by whitespace in the #same element we can't easily keep track of the start and #end character indices anymore. result[key][0].extend(list(docs)) except ET.ParseError: msg.warn(doctext) return result
Parses the XML from the specified blocks of docstrings.
def calls(self): """ Provides access to call overview for the given webhook. API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/webhook-calls :return: :class:`WebhookWebhooksCallProxy <contentful_management.webhook_webhooks_call_proxy.WebhookWebhooksCallProxy>` object. :rtype: contentful.webhook_webhooks_call_proxy.WebhookWebhooksCallProxy Usage: >>> webhook_webhooks_call_proxy = webhook.calls() <WebhookWebhooksCallProxy space_id="cfexampleapi" webhook_id="my_webhook"> """ return WebhookWebhooksCallProxy(self._client, self.sys['space'].id, self.sys['id'])
Provides access to call overview for the given webhook. API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/webhook-calls :return: :class:`WebhookWebhooksCallProxy <contentful_management.webhook_webhooks_call_proxy.WebhookWebhooksCallProxy>` object. :rtype: contentful.webhook_webhooks_call_proxy.WebhookWebhooksCallProxy Usage: >>> webhook_webhooks_call_proxy = webhook.calls() <WebhookWebhooksCallProxy space_id="cfexampleapi" webhook_id="my_webhook">
def write(self, data): """ write data on the OUT endpoint associated to the HID interface """ report_size = self.packet_size if self.ep_out: report_size = self.ep_out.wMaxPacketSize for _ in range(report_size - len(data)): data.append(0) self.read_sem.release() if not self.ep_out: bmRequestType = 0x21 #Host to device request of type Class of Recipient Interface bmRequest = 0x09 #Set_REPORT (HID class-specific request for transferring data over EP0) wValue = 0x200 #Issuing an OUT report wIndex = self.intf_number #mBed Board interface number for HID self.dev.ctrl_transfer(bmRequestType, bmRequest, wValue, wIndex, data) return #raise ValueError('EP_OUT endpoint is NULL') self.ep_out.write(data) #logging.debug('sent: %s', data) return
write data on the OUT endpoint associated to the HID interface
def parse(file_or_string): """Parse a file-like object or string. Args: file_or_string (file, str): File-like object or string. Returns: ParseResults: instance of pyparsing parse results. """ from mysqlparse.grammar.sql_file import sql_file_syntax if hasattr(file_or_string, 'read') and hasattr(file_or_string.read, '__call__'): return sql_file_syntax.parseString(file_or_string.read()) elif isinstance(file_or_string, six.string_types): return sql_file_syntax.parseString(file_or_string) else: raise TypeError("Expected file-like or string object, but got '{type_name}' instead.".format( type_name=type(file_or_string).__name__, ))
Parse a file-like object or string. Args: file_or_string (file, str): File-like object or string. Returns: ParseResults: instance of pyparsing parse results.
def get_clean_url(self): """Retrieve the clean, full URL - including username/password.""" if self.needs_auth: self.prompt_auth() url = RepositoryURL(self.url.full_url) url.username = self.username url.password = self.password return url
Retrieve the clean, full URL - including username/password.
def make_action_list(self, item_list, **kwargs): ''' Generates a list of actions for sending to Elasticsearch ''' action_list = [] es_index = get2(kwargs, "es_index", self.es_index) action_type = kwargs.get("action_type","index") action_settings = {'_op_type': action_type, '_index': es_index} doc_type = kwargs.get("doc_type", self.doc_type) if not doc_type: doc_type = "unk" id_field = kwargs.get("id_field") for item in item_list: action = get_es_action_item(item, action_settings, doc_type, id_field) action_list.append(action) return action_list
Generates a list of actions for sending to Elasticsearch
def predict_subsequences(self, sequence_dict, peptide_lengths=None): """Given a dictionary mapping unique keys to amino acid sequences, run MHC binding predictions on all candidate epitopes extracted from sequences and return a EpitopeCollection. Parameters ---------- fasta_dictionary : dict or string Mapping of protein identifiers to protein amino acid sequences. If string then converted to dictionary. """ sequence_dict = check_sequence_dictionary(sequence_dict) peptide_lengths = self._check_peptide_lengths(peptide_lengths) # take each mutated sequence in the dataframe # and general MHC binding scores for all k-mer substrings binding_predictions = [] expected_peptides = set([]) normalized_alleles = [] for key, amino_acid_sequence in sequence_dict.items(): for l in peptide_lengths: for i in range(len(amino_acid_sequence) - l + 1): expected_peptides.add(amino_acid_sequence[i:i + l]) self._check_peptide_inputs(expected_peptides) for allele in self.alleles: # IEDB MHCII predictor expects DRA1 to be omitted. allele = normalize_allele_name(allele, omit_dra1=True) normalized_alleles.append(allele) request = self._get_iedb_request_params( amino_acid_sequence, allele) logger.info( "Calling IEDB (%s) with request %s", self.url, request) response_df = _query_iedb(request, self.url) for _, row in response_df.iterrows(): binding_predictions.append( BindingPrediction( source_sequence_name=key, offset=row['start'] - 1, allele=row['allele'], peptide=row['peptide'], affinity=row['ic50'], percentile_rank=row['rank'], prediction_method_name="iedb-" + self.prediction_method)) self._check_results( binding_predictions, alleles=normalized_alleles, peptides=expected_peptides) return BindingPredictionCollection(binding_predictions)
Given a dictionary mapping unique keys to amino acid sequences, run MHC binding predictions on all candidate epitopes extracted from sequences and return a EpitopeCollection. Parameters ---------- fasta_dictionary : dict or string Mapping of protein identifiers to protein amino acid sequences. If string then converted to dictionary.
def _glslify(r): """Transform a string or a n-tuple to a valid GLSL expression.""" if isinstance(r, string_types): return r else: assert 2 <= len(r) <= 4 return 'vec{}({})'.format(len(r), ', '.join(map(str, r)))
Transform a string or a n-tuple to a valid GLSL expression.
def getFilenameSet(self): """ Returns a set of profiled file names. Note: "file name" is used loosely here. See python documentation for co_filename, linecache module and PEP302. It may not be a valid filesystem path. """ result = set(self.file_dict) # Ignore profiling code. __file__ does not always provide consistent # results with f_code.co_filename (ex: easy_install with zipped egg), # so inspect current frame instead. # XXX: assumes all of pprofile code resides in a single file. result.discard(inspect.currentframe().f_code.co_filename) return result
Returns a set of profiled file names. Note: "file name" is used loosely here. See python documentation for co_filename, linecache module and PEP302. It may not be a valid filesystem path.
def print_list(extracted_list, file=None): """Print the list of tuples as returned by extract_tb() or extract_stack() as a formatted stack trace to the given file.""" if file is None: file = sys.stderr for filename, lineno, name, line in extracted_list: _print(file, ' File "%s", line %d, in %s' % (filename,lineno,name)) if line: _print(file, ' %s' % line.strip())
Print the list of tuples as returned by extract_tb() or extract_stack() as a formatted stack trace to the given file.
def flows(args): """ todo : add some example :param args: :return: """ def flow_if_not(fun): # t = type(fun) if isinstance(fun, iterator): return fun elif isinstance(fun, type) and 'itertools' in str(fun.__class__): return fun else: try: return flow(fun) except AttributeError: # generator object has no attribute '__module__' return fun return FlowList(map(flow_if_not, args))
todo : add some example :param args: :return:
def find_copies(input_dir, exclude_list): """ find files that are not templates and not in the exclude_list for copying from template to image """ copies = [] def copy_finder(copies, dirname): for obj in os.listdir(dirname): pathname = os.path.join(dirname, obj) if os.path.isdir(pathname): continue if obj in exclude_list: continue if obj.endswith('.mustache'): continue copies.append(os.path.join(dirname, obj)) dir_visitor( input_dir, functools.partial(copy_finder, copies) ) return copies
find files that are not templates and not in the exclude_list for copying from template to image
def user_topic_ids(user): """Retrieve the list of topics IDs a user has access to.""" if user.is_super_admin() or user.is_read_only_user(): query = sql.select([models.TOPICS]) else: query = (sql.select([models.JOINS_TOPICS_TEAMS.c.topic_id]) .select_from( models.JOINS_TOPICS_TEAMS.join( models.TOPICS, sql.and_(models.JOINS_TOPICS_TEAMS.c.topic_id == models.TOPICS.c.id, # noqa models.TOPICS.c.state == 'active')) # noqa ).where( sql.or_(models.JOINS_TOPICS_TEAMS.c.team_id.in_(user.teams_ids), # noqa models.JOINS_TOPICS_TEAMS.c.team_id.in_(user.child_teams_ids)))) # noqa rows = flask.g.db_conn.execute(query).fetchall() return [str(row[0]) for row in rows]
Retrieve the list of topics IDs a user has access to.
def solve_discrete_lyapunov(A, B, max_it=50, method="doubling"): r""" Computes the solution to the discrete lyapunov equation .. math:: AXA' - X + B = 0 :math:`X` is computed by using a doubling algorithm. In particular, we iterate to convergence on :math:`X_j` with the following recursions for :math:`j = 1, 2, \dots` starting from :math:`X_0 = B`, :math:`a_0 = A`: .. math:: a_j = a_{j-1} a_{j-1} .. math:: X_j = X_{j-1} + a_{j-1} X_{j-1} a_{j-1}' Parameters ---------- A : array_like(float, ndim=2) An n x n matrix as described above. We assume in order for convergence that the eigenvalues of A have moduli bounded by unity B : array_like(float, ndim=2) An n x n matrix as described above. We assume in order for convergence that the eigenvalues of A have moduli bounded by unity max_it : scalar(int), optional(default=50) The maximum number of iterations method : string, optional(default="doubling") Describes the solution method to use. If it is "doubling" then uses the doubling algorithm to solve, if it is "bartels-stewart" then it uses scipy's implementation of the Bartels-Stewart approach. Returns ------- gamma1: array_like(float, ndim=2) Represents the value :math:`X` """ if method == "doubling": A, B = list(map(np.atleast_2d, [A, B])) alpha0 = A gamma0 = B diff = 5 n_its = 1 while diff > 1e-15: alpha1 = alpha0.dot(alpha0) gamma1 = gamma0 + np.dot(alpha0.dot(gamma0), alpha0.conjugate().T) diff = np.max(np.abs(gamma1 - gamma0)) alpha0 = alpha1 gamma0 = gamma1 n_its += 1 if n_its > max_it: msg = "Exceeded maximum iterations {}, check input matrics" raise ValueError(msg.format(n_its)) elif method == "bartels-stewart": gamma1 = sp_solve_discrete_lyapunov(A, B) else: msg = "Check your method input. Should be doubling or bartels-stewart" raise ValueError(msg) return gamma1
r""" Computes the solution to the discrete lyapunov equation .. math:: AXA' - X + B = 0 :math:`X` is computed by using a doubling algorithm. In particular, we iterate to convergence on :math:`X_j` with the following recursions for :math:`j = 1, 2, \dots` starting from :math:`X_0 = B`, :math:`a_0 = A`: .. math:: a_j = a_{j-1} a_{j-1} .. math:: X_j = X_{j-1} + a_{j-1} X_{j-1} a_{j-1}' Parameters ---------- A : array_like(float, ndim=2) An n x n matrix as described above. We assume in order for convergence that the eigenvalues of A have moduli bounded by unity B : array_like(float, ndim=2) An n x n matrix as described above. We assume in order for convergence that the eigenvalues of A have moduli bounded by unity max_it : scalar(int), optional(default=50) The maximum number of iterations method : string, optional(default="doubling") Describes the solution method to use. If it is "doubling" then uses the doubling algorithm to solve, if it is "bartels-stewart" then it uses scipy's implementation of the Bartels-Stewart approach. Returns ------- gamma1: array_like(float, ndim=2) Represents the value :math:`X`
def get_widget(self, request): """ Field widget is replaced with "RestrictedSelectWidget" because we not want to use modified widgets for filtering. """ return self._update_widget_choices(self.field.formfield(widget=RestrictedSelectWidget).widget)
Field widget is replaced with "RestrictedSelectWidget" because we not want to use modified widgets for filtering.
async def jsk_vc_join(self, ctx: commands.Context, *, destination: typing.Union[discord.VoiceChannel, discord.Member] = None): """ Joins a voice channel, or moves to it if already connected. Passing a voice channel uses that voice channel. Passing a member will use that member's current voice channel. Passing nothing will use the author's voice channel. """ destination = destination or ctx.author if isinstance(destination, discord.Member): if destination.voice and destination.voice.channel: destination = destination.voice.channel else: return await ctx.send("Member has no voice channel.") voice = ctx.guild.voice_client if voice: await voice.move_to(destination) else: await destination.connect(reconnect=True) await ctx.send(f"Connected to {destination.name}.")
Joins a voice channel, or moves to it if already connected. Passing a voice channel uses that voice channel. Passing a member will use that member's current voice channel. Passing nothing will use the author's voice channel.
def add_edge(self, fr, to): """ Add an edge to the graph. Multiple edges between the same vertices will quietly be ignored. N-partite graphs can be used to permit multiple edges by partitioning the graph into vertices and edges. :param fr: The name of the origin vertex. :param to: The name of the destination vertex. :return: """ fr = self.add_vertex(fr) to = self.add_vertex(to) self.adjacency[fr].children.add(to) self.adjacency[to].parents.add(fr)
Add an edge to the graph. Multiple edges between the same vertices will quietly be ignored. N-partite graphs can be used to permit multiple edges by partitioning the graph into vertices and edges. :param fr: The name of the origin vertex. :param to: The name of the destination vertex. :return:
def build_idx_set(branch_id, start_date): """ Builds a dictionary of keys based on the branch code """ code_set = branch_id.split("/") code_set.insert(3, "Rates") idx_set = { "sec": "/".join([code_set[0], code_set[1], "Sections"]), "mag": "/".join([code_set[0], code_set[1], code_set[2], "Magnitude"])} idx_set["rate"] = "/".join(code_set) idx_set["rake"] = "/".join([code_set[0], code_set[1], "Rake"]) idx_set["msr"] = "-".join(code_set[:3]) idx_set["geol"] = code_set[0] if start_date: # time-dependent source idx_set["grid_key"] = "_".join( branch_id.replace("/", "_").split("_")[:-1]) else: # time-independent source idx_set["grid_key"] = branch_id.replace("/", "_") idx_set["total_key"] = branch_id.replace("/", "|") return idx_set
Builds a dictionary of keys based on the branch code
def parse(self, valstr): # type: (bytes) -> None ''' Parse an El Torito section header from a string. Parameters: valstr - The string to parse. Returns: Nothing. ''' if self._initialized: raise pycdlibexception.PyCdlibInternalError('El Torito Section Header already initialized') (self.header_indicator, self.platform_id, self.num_section_entries, self.id_string) = struct.unpack_from(self.FMT, valstr, 0) self._initialized = True
Parse an El Torito section header from a string. Parameters: valstr - The string to parse. Returns: Nothing.
def close_files(self): """Close all files with an activated disk flag.""" for name in self: if getattr(self, '_%s_diskflag' % name): file_ = getattr(self, '_%s_file' % name) file_.close()
Close all files with an activated disk flag.
def _update_counters(self, ti_status): """ Updates the counters per state of the tasks that were running. Can re-add to tasks to run in case required. :param ti_status: the internal status of the backfill job tasks :type ti_status: BackfillJob._DagRunTaskStatus """ for key, ti in list(ti_status.running.items()): ti.refresh_from_db() if ti.state == State.SUCCESS: ti_status.succeeded.add(key) self.log.debug("Task instance %s succeeded. Don't rerun.", ti) ti_status.running.pop(key) continue elif ti.state == State.SKIPPED: ti_status.skipped.add(key) self.log.debug("Task instance %s skipped. Don't rerun.", ti) ti_status.running.pop(key) continue elif ti.state == State.FAILED: self.log.error("Task instance %s failed", ti) ti_status.failed.add(key) ti_status.running.pop(key) continue # special case: if the task needs to run again put it back elif ti.state == State.UP_FOR_RETRY: self.log.warning("Task instance %s is up for retry", ti) ti_status.running.pop(key) ti_status.to_run[key] = ti # special case: if the task needs to be rescheduled put it back elif ti.state == State.UP_FOR_RESCHEDULE: self.log.warning("Task instance %s is up for reschedule", ti) ti_status.running.pop(key) ti_status.to_run[key] = ti # special case: The state of the task can be set to NONE by the task itself # when it reaches concurrency limits. It could also happen when the state # is changed externally, e.g. by clearing tasks from the ui. We need to cover # for that as otherwise those tasks would fall outside of the scope of # the backfill suddenly. elif ti.state == State.NONE: self.log.warning( "FIXME: task instance %s state was set to none externally or " "reaching concurrency limits. Re-adding task to queue.", ti ) ti.set_state(State.SCHEDULED) ti_status.running.pop(key) ti_status.to_run[key] = ti
Updates the counters per state of the tasks that were running. Can re-add to tasks to run in case required. :param ti_status: the internal status of the backfill job tasks :type ti_status: BackfillJob._DagRunTaskStatus
def to_bson_voronoi_list2(self): """ Transforms the voronoi_list into a vlist + bson_nb_voro_list, that are BSON-encodable. :return: [vlist, bson_nb_voro_list], to be used in the as_dict method """ bson_nb_voro_list2 = [None] * len(self.voronoi_list2) for ivoro, voro in enumerate(self.voronoi_list2): if voro is None or voro == 'None': continue site_voro = [] # {'site': neighbors[nn[1]], # 'angle': sa, # 'distance': distances[nn[1]], # 'index': myindex} for nb_dict in voro: site = nb_dict['site'] site_dict = {key: val for key, val in nb_dict.items() if key not in ['site']} #site_voro.append([ps.as_dict(), dd]) [float(c) for c in self.frac_coords] diff = site.frac_coords - self.structure[nb_dict['index']].frac_coords site_voro.append([[nb_dict['index'], [float(c) for c in diff]], site_dict]) bson_nb_voro_list2[ivoro] = site_voro return bson_nb_voro_list2
Transforms the voronoi_list into a vlist + bson_nb_voro_list, that are BSON-encodable. :return: [vlist, bson_nb_voro_list], to be used in the as_dict method
def sync(self): """ synchronize self from Ariane server according its id (prioritary) or name :return: """ LOGGER.debug("Company.sync") params = None if self.id is not None: params = {'id': self.id} elif self.name is not None: params = {'name': self.name} if params is not None: args = {'http_operation': 'GET', 'operation_path': 'get', 'parameters': params} response = CompanyService.requester.call(args) if response.rc != 0: LOGGER.warning( 'Company.sync - Problem while syncing company (name:' + self.name + ', id:' + str(self.id) + '). Reason: ' + str(response.response_content) + '-' + str(response.error_message) + " (" + str(response.rc) + ")" ) else: json_obj = response.response_content self.id = json_obj['companyID'] self.name = json_obj['companyName'] self.description = json_obj['companyDescription'] self.applications_ids = json_obj['companyApplicationsID'] self.ost_ids = json_obj['companyOSTypesID']
synchronize self from Ariane server according its id (prioritary) or name :return:
def to_language(locale): """ Turns a locale name (en_US) into a language name (en-us). Taken `from Django <http://bit.ly/1vWACbE>`_. """ p = locale.find('_') if p >= 0: return locale[:p].lower() + '-' + locale[p + 1:].lower() else: return locale.lower()
Turns a locale name (en_US) into a language name (en-us). Taken `from Django <http://bit.ly/1vWACbE>`_.
async def issueClaim(self, schemaId: ID, claimRequest: ClaimRequest, iA=None, i=None) -> (Claims, Dict[str, ClaimAttributeValues]): """ Issue a claim for the given user and schema. :param schemaId: The schema ID (reference to claim definition schema) :param claimRequest: A claim request containing prover ID and prover-generated values :param iA: accumulator ID :param i: claim's sequence number within accumulator :return: The claim (both primary and non-revocation) """ schemaKey = (await self.wallet.getSchema(schemaId)).getKey() attributes = self._attrRepo.getAttributes(schemaKey, claimRequest.userId) # TODO re-enable when revocation registry is implemented # iA = iA if iA else (await self.wallet.getAccumulator(schemaId)).iA # TODO this has un-obvious side-effects await self._genContxt(schemaId, iA, claimRequest.userId) (c1, claim) = await self._issuePrimaryClaim(schemaId, attributes, claimRequest.U) # TODO re-enable when revocation registry is fully implemented c2 = await self._issueNonRevocationClaim(schemaId, claimRequest.Ur, iA, i) if claimRequest.Ur else None signature = Claims(primaryClaim=c1, nonRevocClaim=c2) return (signature, claim)
Issue a claim for the given user and schema. :param schemaId: The schema ID (reference to claim definition schema) :param claimRequest: A claim request containing prover ID and prover-generated values :param iA: accumulator ID :param i: claim's sequence number within accumulator :return: The claim (both primary and non-revocation)
def swallow_stdout(stream=None): """Divert stdout into the given stream >>> string = StringIO() >>> with swallow_stdout(string): ... print('hello') >>> assert string.getvalue().rstrip() == 'hello' """ saved = sys.stdout if stream is None: stream = StringIO() sys.stdout = stream try: yield finally: sys.stdout = saved
Divert stdout into the given stream >>> string = StringIO() >>> with swallow_stdout(string): ... print('hello') >>> assert string.getvalue().rstrip() == 'hello'
def call_inputhook(self, input_is_ready_func): """ Call the inputhook. (Called by a prompt-toolkit eventloop.) """ self._input_is_ready = input_is_ready_func # Start thread that activates this pipe when there is input to process. def thread(): input_is_ready_func(wait=True) os.write(self._w, b'x') threading.Thread(target=thread).start() # Call inputhook. self.inputhook(self) # Flush the read end of the pipe. try: # Before calling 'os.read', call select.select. This is required # when the gevent monkey patch has been applied. 'os.read' is never # monkey patched and won't be cooperative, so that would block all # other select() calls otherwise. # See: http://www.gevent.org/gevent.os.html # Note: On Windows, this is apparently not an issue. # However, if we would ever want to add a select call, it # should use `windll.kernel32.WaitForMultipleObjects`, # because `select.select` can't wait for a pipe on Windows. if not is_windows(): select_fds([self._r], timeout=None) os.read(self._r, 1024) except OSError: # This happens when the window resizes and a SIGWINCH was received. # We get 'Error: [Errno 4] Interrupted system call' # Just ignore. pass self._input_is_ready = None
Call the inputhook. (Called by a prompt-toolkit eventloop.)
def get_long_short_pos(positions): """ Determines the long and short allocations in a portfolio. Parameters ---------- positions : pd.DataFrame The positions that the strategy takes over time. Returns ------- df_long_short : pd.DataFrame Long and short allocations as a decimal percentage of the total net liquidation """ pos_wo_cash = positions.drop('cash', axis=1) longs = pos_wo_cash[pos_wo_cash > 0].sum(axis=1).fillna(0) shorts = pos_wo_cash[pos_wo_cash < 0].sum(axis=1).fillna(0) cash = positions.cash net_liquidation = longs + shorts + cash df_pos = pd.DataFrame({'long': longs.divide(net_liquidation, axis='index'), 'short': shorts.divide(net_liquidation, axis='index')}) df_pos['net exposure'] = df_pos['long'] + df_pos['short'] return df_pos
Determines the long and short allocations in a portfolio. Parameters ---------- positions : pd.DataFrame The positions that the strategy takes over time. Returns ------- df_long_short : pd.DataFrame Long and short allocations as a decimal percentage of the total net liquidation
def _decorate_urlconf(urlpatterns, decorator=require_auth, *args, **kwargs): '''Decorate all urlpatterns by specified decorator''' if isinstance(urlpatterns, (list, tuple)): for pattern in urlpatterns: if getattr(pattern, 'callback', None): pattern._callback = decorator( pattern.callback, *args, **kwargs) if getattr(pattern, 'url_patterns', []): _decorate_urlconf( pattern.url_patterns, decorator, *args, **kwargs) else: if getattr(urlpatterns, 'callback', None): urlpatterns._callback = decorator( urlpatterns.callback, *args, **kwargs)
Decorate all urlpatterns by specified decorator
def parse_allele_name(name, species_prefix=None): """Takes an allele name and splits it into four parts: 1) species prefix 2) gene name 3) allele family 4) allele code If species_prefix is provided, that is used instead of getting the species prefix from the name. (And in that case, a species prefix in the name will result in an error being raised.) For example, in all of the following inputs: "HLA-A*02:01" "A0201" "A00201" The result is a AlleleName object. Example: AlleleName( species="HLA", # species prefix gene="A", # gene name allele_family="02", # allele family allele_code="01", # allele code ) The logic for other species mostly resembles the naming system for humans, except for mice, rats, and swine, which have archaic nomenclature. """ original = name name = name.strip() if len(name) == 0: raise ValueError("Can't normalize empty MHC allele name") species_from_name, name = split_species_prefix(name) if species_prefix: if species_from_name: raise ValueError("If a species is passed in, we better not have another " "species in the name itself.") species = species_prefix else: species = species_from_name if species in ("H-2", "H2"): gene, allele_code = parse_mouse_allele_name("H-2-" + name) # mice don't have allele families return AlleleName("H-2", gene, "", allele_code) if len(name) == 0: raise AlleleParseError("Incomplete MHC allele name: %s" % (original,)) elif not species: # assume that a missing species name means we're dealing with a # human HLA allele if "-" in name: raise AlleleParseError("Can't parse allele name: %s" % original) species = "HLA" if name[0].upper() == "D": if len(name) == 7: # sometimes we get very compact names like DRB0101 gene, name = parse_letters(name, 3) else: # MHC class II genes like "DQA1" need to be parsed with both # letters and numbers gene, name = parse_alphanum(name, 4) # TODO: make a list of known species/gene pairs, along with # gene synonyms. That should significantly imporve on this kind of # ad-hoc synonym handling. if gene.isalpha(): # expand e.g. DRA -> DRA1, DQB -> DQB1 gene = gene + "1" elif len(name) == 5: # example: SLA-30101 gene, name = name[0], name[1:] elif name[0].isalpha(): # if there are more separators to come, then assume the gene names # can have the form "DQA1" gene, name = parse_letters(name) elif name[0].isdigit(): gene, name = parse_numbers(name) elif len(name) in (6, 7) and ("*" in name or "-" in name or ":" in name): # example: SLA-3*0101 or SLA-3*01:01 gene, name = parse_alphanum(name) _, name = parse_separator(name) else: raise AlleleParseError( "Can't parse gene name from allele: %s" % original) if len(gene) == 0: raise AlleleParseError("No MHC gene name given in %s" % original) if len(name) == 0: raise AlleleParseError("Malformed MHC type %s" % original) gene = gene.upper() # skip initial separator sep, name = parse_separator(name) if species == "SLA": if ":" in name: parts = name.split(":") if len(parts) != 2: raise AlleleParseError( "Unexpected number of ':' characters in '%s'" % original) family, name = parts elif len(name) < 2: raise AlleleParseError("Unable to parse '%s'" % original) elif name.isalpha() or len(name) == 2: # parse sequences serotypes like SLA-1-HB # as shorthand for SLA-1-HB01 family = name name = "01" else: # the family names for pigs can be weirdly complicated # such as 'w13sm' but the alleles still always # end with two digits e.g. SLA-2*w13sm20 family = name[:-2] name = name[-2:] elif len(name) == 4 or (species == "HLA" and gene in ("A", "B", "C")): # If all that's left is e.g. "0201" then only parse the # first two digits as the family code. Also, human Class I alleles # seem to be exceptional in that they have only 2 digit allele # families but 3 digit allele codes # (other species like sheep have 3 digits followed by 2 digits) family, name = parse_numbers(name, max_len=2) else: family, name = parse_numbers(name, max_len=3) sep, name = parse_separator(name) allele_code, rest_of_text = parse_numbers(name) rest_of_text = rest_of_text.strip() if len(rest_of_text) > 0: raise AlleleParseError("The suffix '%s' of '%s' was not parsed" % ( rest_of_text, original)) if len(family) == 1: family = "0" + family elif len(family) == 3 and family[0] == "0": family = family[1:] if len(allele_code) == 0: allele_code = "01" elif len(allele_code) == 3 and allele_code[0] == "0": # normalize HLA-A*02:001 into HLA-A*02:01 allele_code = allele_code[1:] return AlleleName(species, gene, family, allele_code)
Takes an allele name and splits it into four parts: 1) species prefix 2) gene name 3) allele family 4) allele code If species_prefix is provided, that is used instead of getting the species prefix from the name. (And in that case, a species prefix in the name will result in an error being raised.) For example, in all of the following inputs: "HLA-A*02:01" "A0201" "A00201" The result is a AlleleName object. Example: AlleleName( species="HLA", # species prefix gene="A", # gene name allele_family="02", # allele family allele_code="01", # allele code ) The logic for other species mostly resembles the naming system for humans, except for mice, rats, and swine, which have archaic nomenclature.
def create_replication_instance(ReplicationInstanceIdentifier=None, AllocatedStorage=None, ReplicationInstanceClass=None, VpcSecurityGroupIds=None, AvailabilityZone=None, ReplicationSubnetGroupIdentifier=None, PreferredMaintenanceWindow=None, MultiAZ=None, EngineVersion=None, AutoMinorVersionUpgrade=None, Tags=None, KmsKeyId=None, PubliclyAccessible=None): """ Creates the replication instance using the specified parameters. See also: AWS API Documentation :example: response = client.create_replication_instance( ReplicationInstanceIdentifier='string', AllocatedStorage=123, ReplicationInstanceClass='string', VpcSecurityGroupIds=[ 'string', ], AvailabilityZone='string', ReplicationSubnetGroupIdentifier='string', PreferredMaintenanceWindow='string', MultiAZ=True|False, EngineVersion='string', AutoMinorVersionUpgrade=True|False, Tags=[ { 'Key': 'string', 'Value': 'string' }, ], KmsKeyId='string', PubliclyAccessible=True|False ) :type ReplicationInstanceIdentifier: string :param ReplicationInstanceIdentifier: [REQUIRED] The replication instance identifier. This parameter is stored as a lowercase string. Constraints: Must contain from 1 to 63 alphanumeric characters or hyphens. First character must be a letter. Cannot end with a hyphen or contain two consecutive hyphens. Example: myrepinstance :type AllocatedStorage: integer :param AllocatedStorage: The amount of storage (in gigabytes) to be initially allocated for the replication instance. :type ReplicationInstanceClass: string :param ReplicationInstanceClass: [REQUIRED] The compute and memory capacity of the replication instance as specified by the replication instance class. Valid Values: dms.t2.micro | dms.t2.small | dms.t2.medium | dms.t2.large | dms.c4.large | dms.c4.xlarge | dms.c4.2xlarge | dms.c4.4xlarge :type VpcSecurityGroupIds: list :param VpcSecurityGroupIds: Specifies the VPC security group to be used with the replication instance. The VPC security group must work with the VPC containing the replication instance. (string) -- :type AvailabilityZone: string :param AvailabilityZone: The EC2 Availability Zone that the replication instance will be created in. Default: A random, system-chosen Availability Zone in the endpoint's region. Example: us-east-1d :type ReplicationSubnetGroupIdentifier: string :param ReplicationSubnetGroupIdentifier: A subnet group to associate with the replication instance. :type PreferredMaintenanceWindow: string :param PreferredMaintenanceWindow: The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC). Format: ddd:hh24:mi-ddd:hh24:mi Default: A 30-minute window selected at random from an 8-hour block of time per region, occurring on a random day of the week. Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun Constraints: Minimum 30-minute window. :type MultiAZ: boolean :param MultiAZ: Specifies if the replication instance is a Multi-AZ deployment. You cannot set the AvailabilityZone parameter if the Multi-AZ parameter is set to true . :type EngineVersion: string :param EngineVersion: The engine version number of the replication instance. :type AutoMinorVersionUpgrade: boolean :param AutoMinorVersionUpgrade: Indicates that minor engine upgrades will be applied automatically to the replication instance during the maintenance window. Default: true :type Tags: list :param Tags: Tags to be associated with the replication instance. (dict) -- Key (string) --A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and cannot be prefixed with 'aws:' or 'dms:'. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: '^([\p{L}\p{Z}\p{N}_.:/=+\-]*)$'). Value (string) --A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and cannot be prefixed with 'aws:' or 'dms:'. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: '^([\p{L}\p{Z}\p{N}_.:/=+\-]*)$'). :type KmsKeyId: string :param KmsKeyId: The KMS key identifier that will be used to encrypt the content on the replication instance. If you do not specify a value for the KmsKeyId parameter, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. :type PubliclyAccessible: boolean :param PubliclyAccessible: Specifies the accessibility options for the replication instance. A value of true represents an instance with a public IP address. A value of false represents an instance with a private IP address. The default value is true . :rtype: dict :return: { 'ReplicationInstance': { 'ReplicationInstanceIdentifier': 'string', 'ReplicationInstanceClass': 'string', 'ReplicationInstanceStatus': 'string', 'AllocatedStorage': 123, 'InstanceCreateTime': datetime(2015, 1, 1), 'VpcSecurityGroups': [ { 'VpcSecurityGroupId': 'string', 'Status': 'string' }, ], 'AvailabilityZone': 'string', 'ReplicationSubnetGroup': { 'ReplicationSubnetGroupIdentifier': 'string', 'ReplicationSubnetGroupDescription': 'string', 'VpcId': 'string', 'SubnetGroupStatus': 'string', 'Subnets': [ { 'SubnetIdentifier': 'string', 'SubnetAvailabilityZone': { 'Name': 'string' }, 'SubnetStatus': 'string' }, ] }, 'PreferredMaintenanceWindow': 'string', 'PendingModifiedValues': { 'ReplicationInstanceClass': 'string', 'AllocatedStorage': 123, 'MultiAZ': True|False, 'EngineVersion': 'string' }, 'MultiAZ': True|False, 'EngineVersion': 'string', 'AutoMinorVersionUpgrade': True|False, 'KmsKeyId': 'string', 'ReplicationInstanceArn': 'string', 'ReplicationInstancePublicIpAddress': 'string', 'ReplicationInstancePrivateIpAddress': 'string', 'ReplicationInstancePublicIpAddresses': [ 'string', ], 'ReplicationInstancePrivateIpAddresses': [ 'string', ], 'PubliclyAccessible': True|False, 'SecondaryAvailabilityZone': 'string' } } :returns: Must contain from 1 to 63 alphanumeric characters or hyphens. First character must be a letter. Cannot end with a hyphen or contain two consecutive hyphens. """ pass
Creates the replication instance using the specified parameters. See also: AWS API Documentation :example: response = client.create_replication_instance( ReplicationInstanceIdentifier='string', AllocatedStorage=123, ReplicationInstanceClass='string', VpcSecurityGroupIds=[ 'string', ], AvailabilityZone='string', ReplicationSubnetGroupIdentifier='string', PreferredMaintenanceWindow='string', MultiAZ=True|False, EngineVersion='string', AutoMinorVersionUpgrade=True|False, Tags=[ { 'Key': 'string', 'Value': 'string' }, ], KmsKeyId='string', PubliclyAccessible=True|False ) :type ReplicationInstanceIdentifier: string :param ReplicationInstanceIdentifier: [REQUIRED] The replication instance identifier. This parameter is stored as a lowercase string. Constraints: Must contain from 1 to 63 alphanumeric characters or hyphens. First character must be a letter. Cannot end with a hyphen or contain two consecutive hyphens. Example: myrepinstance :type AllocatedStorage: integer :param AllocatedStorage: The amount of storage (in gigabytes) to be initially allocated for the replication instance. :type ReplicationInstanceClass: string :param ReplicationInstanceClass: [REQUIRED] The compute and memory capacity of the replication instance as specified by the replication instance class. Valid Values: dms.t2.micro | dms.t2.small | dms.t2.medium | dms.t2.large | dms.c4.large | dms.c4.xlarge | dms.c4.2xlarge | dms.c4.4xlarge :type VpcSecurityGroupIds: list :param VpcSecurityGroupIds: Specifies the VPC security group to be used with the replication instance. The VPC security group must work with the VPC containing the replication instance. (string) -- :type AvailabilityZone: string :param AvailabilityZone: The EC2 Availability Zone that the replication instance will be created in. Default: A random, system-chosen Availability Zone in the endpoint's region. Example: us-east-1d :type ReplicationSubnetGroupIdentifier: string :param ReplicationSubnetGroupIdentifier: A subnet group to associate with the replication instance. :type PreferredMaintenanceWindow: string :param PreferredMaintenanceWindow: The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC). Format: ddd:hh24:mi-ddd:hh24:mi Default: A 30-minute window selected at random from an 8-hour block of time per region, occurring on a random day of the week. Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun Constraints: Minimum 30-minute window. :type MultiAZ: boolean :param MultiAZ: Specifies if the replication instance is a Multi-AZ deployment. You cannot set the AvailabilityZone parameter if the Multi-AZ parameter is set to true . :type EngineVersion: string :param EngineVersion: The engine version number of the replication instance. :type AutoMinorVersionUpgrade: boolean :param AutoMinorVersionUpgrade: Indicates that minor engine upgrades will be applied automatically to the replication instance during the maintenance window. Default: true :type Tags: list :param Tags: Tags to be associated with the replication instance. (dict) -- Key (string) --A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and cannot be prefixed with 'aws:' or 'dms:'. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: '^([\p{L}\p{Z}\p{N}_.:/=+\-]*)$'). Value (string) --A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and cannot be prefixed with 'aws:' or 'dms:'. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: '^([\p{L}\p{Z}\p{N}_.:/=+\-]*)$'). :type KmsKeyId: string :param KmsKeyId: The KMS key identifier that will be used to encrypt the content on the replication instance. If you do not specify a value for the KmsKeyId parameter, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. :type PubliclyAccessible: boolean :param PubliclyAccessible: Specifies the accessibility options for the replication instance. A value of true represents an instance with a public IP address. A value of false represents an instance with a private IP address. The default value is true . :rtype: dict :return: { 'ReplicationInstance': { 'ReplicationInstanceIdentifier': 'string', 'ReplicationInstanceClass': 'string', 'ReplicationInstanceStatus': 'string', 'AllocatedStorage': 123, 'InstanceCreateTime': datetime(2015, 1, 1), 'VpcSecurityGroups': [ { 'VpcSecurityGroupId': 'string', 'Status': 'string' }, ], 'AvailabilityZone': 'string', 'ReplicationSubnetGroup': { 'ReplicationSubnetGroupIdentifier': 'string', 'ReplicationSubnetGroupDescription': 'string', 'VpcId': 'string', 'SubnetGroupStatus': 'string', 'Subnets': [ { 'SubnetIdentifier': 'string', 'SubnetAvailabilityZone': { 'Name': 'string' }, 'SubnetStatus': 'string' }, ] }, 'PreferredMaintenanceWindow': 'string', 'PendingModifiedValues': { 'ReplicationInstanceClass': 'string', 'AllocatedStorage': 123, 'MultiAZ': True|False, 'EngineVersion': 'string' }, 'MultiAZ': True|False, 'EngineVersion': 'string', 'AutoMinorVersionUpgrade': True|False, 'KmsKeyId': 'string', 'ReplicationInstanceArn': 'string', 'ReplicationInstancePublicIpAddress': 'string', 'ReplicationInstancePrivateIpAddress': 'string', 'ReplicationInstancePublicIpAddresses': [ 'string', ], 'ReplicationInstancePrivateIpAddresses': [ 'string', ], 'PubliclyAccessible': True|False, 'SecondaryAvailabilityZone': 'string' } } :returns: Must contain from 1 to 63 alphanumeric characters or hyphens. First character must be a letter. Cannot end with a hyphen or contain two consecutive hyphens.
def main_loop(self, steps_per_epoch, starting_epoch, max_epoch): """ Run the main training loop. Args: steps_per_epoch, starting_epoch, max_epoch (int): """ with self.sess.as_default(): self.loop.config(steps_per_epoch, starting_epoch, max_epoch) self.loop.update_global_step() try: self._callbacks.before_train() # refresh global step (might have changed by callbacks) TODO ugly # what if gs is changed later? self.loop.update_global_step() for self.loop._epoch_num in range( self.loop.starting_epoch, self.loop.max_epoch + 1): logger.info("Start Epoch {} ...".format(self.loop.epoch_num)) self._callbacks.before_epoch() start_time = time.time() for self.loop._local_step in range(self.loop.steps_per_epoch): if self.hooked_sess.should_stop(): return self.run_step() # implemented by subclass self._callbacks.trigger_step() self._callbacks.after_epoch() logger.info("Epoch {} (global_step {}) finished, time:{}.".format( self.loop.epoch_num, self.loop.global_step, humanize_time_delta(time.time() - start_time))) # trigger epoch outside the timing region. self._callbacks.trigger_epoch() logger.info("Training has finished!") except (StopTraining, tf.errors.OutOfRangeError) as e: logger.info("Training was stopped by exception {}.".format(str(e))) except KeyboardInterrupt: logger.info("Detected Ctrl-C and exiting main loop.") raise finally: self._callbacks.after_train() self.hooked_sess.close()
Run the main training loop. Args: steps_per_epoch, starting_epoch, max_epoch (int):
def list_nodes(conn=None, call=None): ''' Return a list of the VMs that are on the provider ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes function must be called with -f or --function.' ) if not conn: conn = get_conn() # pylint: disable=E0602 nodes = conn.list_nodes() ret = {} for node in nodes: ret[node.name] = { 'id': node.id, 'image': node.image, 'name': node.name, 'private_ips': node.private_ips, 'public_ips': node.public_ips, 'size': node.size, 'state': node_state(node.state) } return ret
Return a list of the VMs that are on the provider
def solution_violations(solution, events, slots): """Take a solution and return a list of violated constraints Parameters ---------- solution: list or tuple a schedule in solution form events : list or tuple of resources.Event instances slots : list or tuple of resources.Slot instances Returns ------- Generator of a list of strings indicating the nature of the violated constraints """ array = converter.solution_to_array(solution, events, slots) return array_violations(array, events, slots)
Take a solution and return a list of violated constraints Parameters ---------- solution: list or tuple a schedule in solution form events : list or tuple of resources.Event instances slots : list or tuple of resources.Slot instances Returns ------- Generator of a list of strings indicating the nature of the violated constraints
def get_args(): ''' Parse CLI args ''' parser = argparse.ArgumentParser(description='Process args') parser.Add_argument( '-H', '--host', required=True, action='store', help='Remote host to connect to' ) parser.add_argument( '-P', '--port', type=int, default=443, action='store', help='Port to connect on' ) parser.add_argument( '-u', '--user', required=True, action='store', help='User name to use when connecting to host' ) parser.add_argument( '-p', '--password', required=False, action='store', help='Password to use when connecting to host' ) parser.add_argument( '-s', '--ssl', required=False, action='store_true', help='Use SSL' ) parser.add_argument( '-k', '--skip-ssl-verification', required=False, default=False, action='store_true', help='Skip SSL certificate validation' ) parser.add_argument( '-n', '--dryrun', required=False, action='store_true', default=False, help='Dry run. Don\'t annotate any VM' ) parser.add_argument( '-v', '--verbose', action='store_true', default=False, help='Verbose output' ) return parser.parse_args()
Parse CLI args
def _mirror_groups(self): """ Mirrors the user's LDAP groups in the Django database and updates the user's membership. """ target_group_names = frozenset(self._get_groups().get_group_names()) current_group_names = frozenset( self._user.groups.values_list("name", flat=True).iterator() ) # These were normalized to sets above. MIRROR_GROUPS_EXCEPT = self.settings.MIRROR_GROUPS_EXCEPT MIRROR_GROUPS = self.settings.MIRROR_GROUPS # If the settings are white- or black-listing groups, we'll update # target_group_names such that we won't modify the membership of groups # beyond our purview. if isinstance(MIRROR_GROUPS_EXCEPT, (set, frozenset)): target_group_names = (target_group_names - MIRROR_GROUPS_EXCEPT) | ( current_group_names & MIRROR_GROUPS_EXCEPT ) elif isinstance(MIRROR_GROUPS, (set, frozenset)): target_group_names = (target_group_names & MIRROR_GROUPS) | ( current_group_names - MIRROR_GROUPS ) if target_group_names != current_group_names: existing_groups = list( Group.objects.filter(name__in=target_group_names).iterator() ) existing_group_names = frozenset(group.name for group in existing_groups) new_groups = [ Group.objects.get_or_create(name=name)[0] for name in target_group_names if name not in existing_group_names ] self._user.groups.set(existing_groups + new_groups)
Mirrors the user's LDAP groups in the Django database and updates the user's membership.
def query_item(self, key, abis): """Query items based on system call number or name.""" try: key = int(key) field = 'number' except ValueError: try: key = int(key, 16) field = 'number' except ValueError: field = 'name' arg = and_(getattr(Item, field) == key, or_(Item.abi == abi for abi in abis)) return self.session.query(Item).filter(arg).all()
Query items based on system call number or name.
def _set_extensions(self): """ Sets common named extensions to private attributes and creates a list of critical extensions """ self._critical_extensions = set() for extension in self['single_extensions']: name = extension['extn_id'].native attribute_name = '_%s_value' % name if hasattr(self, attribute_name): setattr(self, attribute_name, extension['extn_value'].parsed) if extension['critical'].native: self._critical_extensions.add(name) self._processed_extensions = True
Sets common named extensions to private attributes and creates a list of critical extensions
def touch_member(config, dcs): ''' Rip-off of the ha.touch_member without inter-class dependencies ''' p = Postgresql(config['postgresql']) p.set_state('running') p.set_role('master') def restapi_connection_string(config): protocol = 'https' if config.get('certfile') else 'http' connect_address = config.get('connect_address') listen = config['listen'] return '{0}://{1}/patroni'.format(protocol, connect_address or listen) data = { 'conn_url': p.connection_string, 'api_url': restapi_connection_string(config['restapi']), 'state': p.state, 'role': p.role } return dcs.touch_member(data, permanent=True)
Rip-off of the ha.touch_member without inter-class dependencies
def _get_event_source_obj(awsclient, evt_source): """ Given awsclient, event_source dictionary item create an event_source object of the appropriate event type to schedule this event, and return the object. """ event_source_map = { 'dynamodb': event_source.dynamodb_stream.DynamoDBStreamEventSource, 'kinesis': event_source.kinesis.KinesisEventSource, 's3': event_source.s3.S3EventSource, 'sns': event_source.sns.SNSEventSource, 'events': event_source.cloudwatch.CloudWatchEventSource, 'cloudfront': event_source.cloudfront.CloudFrontEventSource, 'cloudwatch_logs': event_source.cloudwatch_logs.CloudWatchLogsEventSource, } evt_type = _get_event_type(evt_source) event_source_func = event_source_map.get(evt_type, None) if not event_source: raise ValueError('Unknown event source: {0}'.format( evt_source['arn'])) return event_source_func(awsclient, evt_source)
Given awsclient, event_source dictionary item create an event_source object of the appropriate event type to schedule this event, and return the object.
def run_analysis(self, argv): """Run this analysis""" args = self._parser.parse_args(argv) obs = BinnedAnalysis.BinnedObs(irfs=args.irfs, expCube=args.expcube, srcMaps=args.srcmaps, binnedExpMap=args.bexpmap) like = BinnedAnalysis.BinnedAnalysis(obs, optimizer='MINUIT', srcModel=GtMergeSrcmaps.NULL_MODEL, wmap=None) like.logLike.set_use_single_fixed_map(False) print("Reading xml model from %s" % args.srcmdl) source_factory = pyLike.SourceFactory(obs.observation) source_factory.readXml(args.srcmdl, BinnedAnalysis._funcFactory, False, True, True) strv = pyLike.StringVector() source_factory.fetchSrcNames(strv) source_names = [strv[i] for i in range(strv.size())] missing_sources = [] srcs_to_merge = [] for source_name in source_names: try: source = source_factory.releaseSource(source_name) # EAC, add the source directly to the model like.logLike.addSource(source) srcs_to_merge.append(source_name) except KeyError: missing_sources.append(source_name) comp = like.mergeSources(args.merged, source_names, 'ConstantValue') like.logLike.getSourceMap(comp.getName()) print("Merged %i sources into %s" % (len(srcs_to_merge), comp.getName())) if missing_sources: print("Missed sources: ", missing_sources) print("Writing output source map file %s" % args.outfile) like.logLike.saveSourceMaps(args.outfile, False, False) if args.gzip: os.system("gzip -9 %s" % args.outfile) print("Writing output xml file %s" % args.outxml) like.writeXml(args.outxml)
Run this analysis
def naturalsortkey(s): """Natural sort order""" return [int(part) if part.isdigit() else part for part in re.split('([0-9]+)', s)]
Natural sort order
def view_surface_app_activity(self) -> str: '''Get package with activity of applications that are running in the foreground.''' output, error = self._execute( '-s', self.device_sn, 'shell', 'dumpsys', 'window', 'w') return re.findall(r"name=([a-zA-Z0-9\.]+/.[a-zA-Z0-9\.]+)", output)
Get package with activity of applications that are running in the foreground.
def get(self, path): # pylint: disable=W0221 """Renders a GET request, by showing this nodes stats and children.""" path = path or '' path = path.lstrip('/') parts = path.split('/') if not parts[0]: parts = parts[1:] statDict = util.lookup(scales.getStats(), parts) if statDict is None: self.set_status(404) self.finish('Path not found.') return outputFormat = self.get_argument('format', default='html') query = self.get_argument('query', default=None) if outputFormat == 'json': formats.jsonFormat(self, statDict, query) elif outputFormat == 'prettyjson': formats.jsonFormat(self, statDict, query, pretty=True) else: formats.htmlHeader(self, '/' + path, self.serverName, query) formats.htmlFormat(self, tuple(parts), statDict, query) return None
Renders a GET request, by showing this nodes stats and children.
def ip_allocate(self, public=False): """ Allocates a new :any:`IPAddress` for this Instance. Additional public IPs require justification, and you may need to open a :any:`SupportTicket` before you can add one. You may only have, at most, one private IP per Instance. :param public: If the new IP should be public or private. Defaults to private. :type public: bool :returns: The new IPAddress :rtype: IPAddress """ result = self._client.post( "{}/ips".format(Instance.api_endpoint), model=self, data={ "type": "ipv4", "public": public, }) if not 'address' in result: raise UnexpectedResponseError('Unexpected response allocating IP!', json=result) i = IPAddress(self._client, result['address'], result) return i
Allocates a new :any:`IPAddress` for this Instance. Additional public IPs require justification, and you may need to open a :any:`SupportTicket` before you can add one. You may only have, at most, one private IP per Instance. :param public: If the new IP should be public or private. Defaults to private. :type public: bool :returns: The new IPAddress :rtype: IPAddress
def iterator_chain(variables: VarType, parent: str = None) -> Iterable[VarMatrix]: """This successively appends each element of an array to a single list of values. This takes a list of values and puts all the values generated for each element in the list into a single list of values. It uses the :func:`itertools.chain` function to achieve this. This function is particularly useful for specifying multiple types of simulations with different parameters. Args: variables: The variables object parent: Unused """ logger.debug("Yielding from append iterator") if not isinstance(variables, list): raise ValueError( f"Append keyword only takes a list of arguments, got {variables} of type {type(variables)}" ) # Create a single list containing all the values yield list( chain.from_iterable( variable_matrix(item, parent, "product") for item in variables ) )
This successively appends each element of an array to a single list of values. This takes a list of values and puts all the values generated for each element in the list into a single list of values. It uses the :func:`itertools.chain` function to achieve this. This function is particularly useful for specifying multiple types of simulations with different parameters. Args: variables: The variables object parent: Unused
def get_exe_path(cls): """ Return the full path to the executable. """ return os.path.abspath(os.path.join(ROOT, cls.bmds_version_dir, cls.exe + ".exe"))
Return the full path to the executable.
def update_vm(vm_ref, vm_config_spec): ''' Updates the virtual machine configuration with the given object vm_ref Virtual machine managed object reference vm_config_spec Virtual machine config spec object to update ''' vm_name = get_managed_object_name(vm_ref) log.trace('Updating vm \'%s\'', vm_name) try: task = vm_ref.ReconfigVM_Task(vm_config_spec) except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( 'Not enough permissions. Required privilege: ' '{}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task') return vm_ref
Updates the virtual machine configuration with the given object vm_ref Virtual machine managed object reference vm_config_spec Virtual machine config spec object to update
def get_subhash(hash): """Get a second hash based on napiprojekt's hash. :param str hash: napiprojekt's hash. :return: the subhash. :rtype: str """ idx = [0xe, 0x3, 0x6, 0x8, 0x2] mul = [2, 2, 5, 4, 3] add = [0, 0xd, 0x10, 0xb, 0x5] b = [] for i in range(len(idx)): a = add[i] m = mul[i] i = idx[i] t = a + int(hash[i], 16) v = int(hash[t:t + 2], 16) b.append(('%x' % (v * m))[-1]) return ''.join(b)
Get a second hash based on napiprojekt's hash. :param str hash: napiprojekt's hash. :return: the subhash. :rtype: str
def _apply_policy_config(policy_spec, policy_dict): '''Applies a policy dictionary to a policy spec''' log.trace('policy_dict = %s', policy_dict) if policy_dict.get('name'): policy_spec.name = policy_dict['name'] if policy_dict.get('description'): policy_spec.description = policy_dict['description'] if policy_dict.get('subprofiles'): # Incremental changes to subprofiles and capabilities are not # supported because they would complicate updates too much # The whole configuration of all sub-profiles is expected and applied policy_spec.constraints = pbm.profile.SubProfileCapabilityConstraints() subprofiles = [] for subprofile_dict in policy_dict['subprofiles']: subprofile_spec = \ pbm.profile.SubProfileCapabilityConstraints.SubProfile( name=subprofile_dict['name']) cap_specs = [] if subprofile_dict.get('force_provision'): subprofile_spec.forceProvision = \ subprofile_dict['force_provision'] for cap_dict in subprofile_dict['capabilities']: prop_inst_spec = pbm.capability.PropertyInstance( id=cap_dict['id'] ) setting_type = cap_dict['setting']['type'] if setting_type == 'set': prop_inst_spec.value = pbm.capability.types.DiscreteSet() prop_inst_spec.value.values = cap_dict['setting']['values'] elif setting_type == 'range': prop_inst_spec.value = pbm.capability.types.Range() prop_inst_spec.value.max = cap_dict['setting']['max'] prop_inst_spec.value.min = cap_dict['setting']['min'] elif setting_type == 'scalar': prop_inst_spec.value = cap_dict['setting']['value'] cap_spec = pbm.capability.CapabilityInstance( id=pbm.capability.CapabilityMetadata.UniqueId( id=cap_dict['id'], namespace=cap_dict['namespace']), constraint=[pbm.capability.ConstraintInstance( propertyInstance=[prop_inst_spec])]) cap_specs.append(cap_spec) subprofile_spec.capability = cap_specs subprofiles.append(subprofile_spec) policy_spec.constraints.subProfiles = subprofiles log.trace('updated policy_spec = %s', policy_spec) return policy_spec
Applies a policy dictionary to a policy spec
def best_model(self): """Rebuilds the top scoring model from an optimisation. Returns ------- model: AMPAL Returns an AMPAL model of the top scoring parameters. Raises ------ NameError: Raises a name error if the optimiser has not been run. """ if hasattr(self, 'halloffame'): model = self._params['specification']( *self.parse_individual(self.halloffame[0])) model.pack_new_sequences(self._params['sequence']) return model else: raise NameError('No best model found, have you ran the optimiser?')
Rebuilds the top scoring model from an optimisation. Returns ------- model: AMPAL Returns an AMPAL model of the top scoring parameters. Raises ------ NameError: Raises a name error if the optimiser has not been run.
def _calculate_values(self, tree, bar_d): """Calculate values for drawing bars of non-leafs in ``tree`` Recurses through ``tree``, replaces ``dict``s with ``(BarDescriptor, dict)`` so ``ProgressTree._draw`` can use the ``BarDescriptor``s to draw the tree """ if all([ isinstance(tree, dict), type(tree) != BarDescriptor ]): # Calculate value and max_value max_val = 0 value = 0 for k in tree: # Get descriptor by recursing bar_desc = self._calculate_values(tree[k], bar_d) # Reassign to tuple of (new descriptor, tree below) tree[k] = (bar_desc, tree[k]) value += bar_desc["value"].value max_val += bar_desc.get("kwargs", {}).get("max_value", 100) # Merge in values from ``bar_d`` before returning descriptor kwargs = merge_dicts( [bar_d.get("kwargs", {}), dict(max_value=max_val)], deepcopy=True ) ret_d = merge_dicts( [bar_d, dict(value=Value(floor(value)), kwargs=kwargs)], deepcopy=True ) return BarDescriptor(ret_d) elif isinstance(tree, BarDescriptor): return tree else: raise TypeError("Unexpected type {}".format(type(tree)))
Calculate values for drawing bars of non-leafs in ``tree`` Recurses through ``tree``, replaces ``dict``s with ``(BarDescriptor, dict)`` so ``ProgressTree._draw`` can use the ``BarDescriptor``s to draw the tree
def can_handle(self, data): r""" >>> e = Entry('http://www.github.com/?bar=foo&foobar', Entry.GET, (Response(b'<html/>'),)) >>> e.can_handle(b'GET /?bar=foo HTTP/1.1\r\nHost: github.com\r\nAccept-Encoding: gzip, deflate\r\nConnection: keep-alive\r\nUser-Agent: python-requests/2.7.0 CPython/3.4.3 Linux/3.19.0-16-generic\r\nAccept: */*\r\n\r\n') False >>> e = Entry('http://www.github.com/?bar=foo&foobar', Entry.GET, (Response(b'<html/>'),)) >>> e.can_handle(b'GET /?bar=foo&foobar HTTP/1.1\r\nHost: github.com\r\nAccept-Encoding: gzip, deflate\r\nConnection: keep-alive\r\nUser-Agent: python-requests/2.7.0 CPython/3.4.3 Linux/3.19.0-16-generic\r\nAccept: */*\r\n\r\n') True """ try: requestline, _ = decode_from_bytes(data).split(CRLF, 1) method, path, version = self._parse_requestline(requestline) except ValueError: try: return self == Mocket._last_entry except AttributeError: return False uri = urlsplit(path) can_handle = uri.path == self.path and method == self.method if self._match_querystring: kw = dict(keep_blank_values=True) can_handle = can_handle and parse_qs(uri.query, **kw) == parse_qs(self.query, **kw) if can_handle: Mocket._last_entry = self return can_handle
r""" >>> e = Entry('http://www.github.com/?bar=foo&foobar', Entry.GET, (Response(b'<html/>'),)) >>> e.can_handle(b'GET /?bar=foo HTTP/1.1\r\nHost: github.com\r\nAccept-Encoding: gzip, deflate\r\nConnection: keep-alive\r\nUser-Agent: python-requests/2.7.0 CPython/3.4.3 Linux/3.19.0-16-generic\r\nAccept: */*\r\n\r\n') False >>> e = Entry('http://www.github.com/?bar=foo&foobar', Entry.GET, (Response(b'<html/>'),)) >>> e.can_handle(b'GET /?bar=foo&foobar HTTP/1.1\r\nHost: github.com\r\nAccept-Encoding: gzip, deflate\r\nConnection: keep-alive\r\nUser-Agent: python-requests/2.7.0 CPython/3.4.3 Linux/3.19.0-16-generic\r\nAccept: */*\r\n\r\n') True
def fade(self, fade_in_len=0.0, fade_out_len=0.0, fade_shape='q'): '''Add a fade in and/or fade out to an audio file. Default fade shape is 1/4 sine wave. Parameters ---------- fade_in_len : float, default=0.0 Length of fade-in (seconds). If fade_in_len = 0, no fade in is applied. fade_out_len : float, defaut=0.0 Length of fade-out (seconds). If fade_out_len = 0, no fade in is applied. fade_shape : str, default='q' Shape of fade. Must be one of * 'q' for quarter sine (default), * 'h' for half sine, * 't' for linear, * 'l' for logarithmic * 'p' for inverted parabola. See Also -------- splice ''' fade_shapes = ['q', 'h', 't', 'l', 'p'] if fade_shape not in fade_shapes: raise ValueError( "Fade shape must be one of {}".format(" ".join(fade_shapes)) ) if not is_number(fade_in_len) or fade_in_len < 0: raise ValueError("fade_in_len must be a nonnegative number.") if not is_number(fade_out_len) or fade_out_len < 0: raise ValueError("fade_out_len must be a nonnegative number.") effect_args = [] if fade_in_len > 0: effect_args.extend([ 'fade', '{}'.format(fade_shape), '{:f}'.format(fade_in_len) ]) if fade_out_len > 0: effect_args.extend([ 'reverse', 'fade', '{}'.format(fade_shape), '{:f}'.format(fade_out_len), 'reverse' ]) if len(effect_args) > 0: self.effects.extend(effect_args) self.effects_log.append('fade') return self
Add a fade in and/or fade out to an audio file. Default fade shape is 1/4 sine wave. Parameters ---------- fade_in_len : float, default=0.0 Length of fade-in (seconds). If fade_in_len = 0, no fade in is applied. fade_out_len : float, defaut=0.0 Length of fade-out (seconds). If fade_out_len = 0, no fade in is applied. fade_shape : str, default='q' Shape of fade. Must be one of * 'q' for quarter sine (default), * 'h' for half sine, * 't' for linear, * 'l' for logarithmic * 'p' for inverted parabola. See Also -------- splice
def get_align(text): "Return (halign, valign, angle) of the <text>." (x1, x2, h, v, a) = unaligned_get_dimension(text) return (h, v, a)
Return (halign, valign, angle) of the <text>.