code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def validate_value(self, value): """Validate value is an acceptable type during set_python operation""" if self.readonly: raise ValidationError(self.record, "Cannot set readonly field '{}'".format(self.name)) if value not in (None, self._unset): if self.supported_types and not isinstance(value, tuple(self.supported_types)): raise ValidationError(self.record, "Field '{}' expects one of {}, got '{}' instead".format( self.name, ', '.join([repr(t.__name__) for t in self.supported_types]), type(value).__name__) )
Validate value is an acceptable type during set_python operation
def _GetNumberOfSeconds(self, fat_date_time): """Retrieves the number of seconds from a FAT date time. Args: fat_date_time (int): FAT date time. Returns: int: number of seconds since January 1, 1980 00:00:00. Raises: ValueError: if the month, day of month, hours, minutes or seconds value is out of bounds. """ day_of_month = (fat_date_time & 0x1f) month = ((fat_date_time >> 5) & 0x0f) year = (fat_date_time >> 9) & 0x7f days_per_month = self._GetDaysPerMonth(year, month) if day_of_month < 1 or day_of_month > days_per_month: raise ValueError('Day of month value out of bounds.') number_of_days = self._GetDayOfYear(1980 + year, month, day_of_month) number_of_days -= 1 for past_year in range(0, year): number_of_days += self._GetNumberOfDaysInYear(past_year) fat_date_time >>= 16 seconds = (fat_date_time & 0x1f) * 2 minutes = (fat_date_time >> 5) & 0x3f hours = (fat_date_time >> 11) & 0x1f if hours not in range(0, 24): raise ValueError('Hours value out of bounds.') if minutes not in range(0, 60): raise ValueError('Minutes value out of bounds.') if seconds not in range(0, 60): raise ValueError('Seconds value out of bounds.') number_of_seconds = (((hours * 60) + minutes) * 60) + seconds number_of_seconds += number_of_days * definitions.SECONDS_PER_DAY return number_of_seconds
Retrieves the number of seconds from a FAT date time. Args: fat_date_time (int): FAT date time. Returns: int: number of seconds since January 1, 1980 00:00:00. Raises: ValueError: if the month, day of month, hours, minutes or seconds value is out of bounds.
def render_metadata(**kwargs): """ Unstrict template block for rendering metadata: <div class="metadata"> <img class="metadata-logo" src="{service_logo}"> <p class="metadata-name">{service_name}</p> <p class="metadata-timestamp"> <a href="{timestamp_link}">{timestamp}</a> </p> </div> """ html = '<div class="metadata">' service_logo = kwargs.get('service_logo', None) if service_logo: html += '<img class="metadata-logo" src="{}">'.format(service_logo) service_name = kwargs.get('service_name', None) if service_name: html += '<p class="metadata-name">{}</p>'.format(service_name) timestamp = kwargs.get('timestamp', None) if timestamp: html += '<p class="user-name">' timestamp_link = kwargs.get('timestamp_link', None) if timestamp_link: html += '<a href="{timestamp_link}">{timestamp}</a>'.format( timestamp_link=timestamp_link, timestamp=timestamp ) else: html += timestamp html += '</p>' html += '</div>'
Unstrict template block for rendering metadata: <div class="metadata"> <img class="metadata-logo" src="{service_logo}"> <p class="metadata-name">{service_name}</p> <p class="metadata-timestamp"> <a href="{timestamp_link}">{timestamp}</a> </p> </div>
def compute_consistency_score(returns_test, preds): """ Compute Bayesian consistency score. Parameters ---------- returns_test : pd.Series Observed cumulative returns. preds : numpy.array Multiple (simulated) cumulative returns. Returns ------- Consistency score Score from 100 (returns_test perfectly on the median line of the Bayesian cone spanned by preds) to 0 (returns_test completely outside of Bayesian cone.) """ returns_test_cum = cum_returns(returns_test, starting_value=1.) cum_preds = np.cumprod(preds + 1, 1) q = [sp.stats.percentileofscore(cum_preds[:, i], returns_test_cum.iloc[i], kind='weak') for i in range(len(returns_test_cum))] # normalize to be from 100 (perfect median line) to 0 (completely outside # of cone) return 100 - np.abs(50 - np.mean(q)) / .5
Compute Bayesian consistency score. Parameters ---------- returns_test : pd.Series Observed cumulative returns. preds : numpy.array Multiple (simulated) cumulative returns. Returns ------- Consistency score Score from 100 (returns_test perfectly on the median line of the Bayesian cone spanned by preds) to 0 (returns_test completely outside of Bayesian cone.)
def start_end(data, num_start=250, num_end=100, full_output=False): """ Gate out first and last events. Parameters ---------- data : FCSData or numpy array NxD flow cytometry data where N is the number of events and D is the number of parameters (aka channels). num_start, num_end : int, optional Number of events to gate out from beginning and end of `data`. Ignored if less than 0. full_output : bool, optional Flag specifying to return additional outputs. If true, the outputs are given as a namedtuple. Returns ------- gated_data : FCSData or numpy array Gated flow cytometry data of the same format as `data`. mask : numpy array of bool, only if ``full_output==True`` Boolean gate mask used to gate data such that ``gated_data = data[mask]``. Raises ------ ValueError If the number of events to discard is greater than the total number of events in `data`. """ if num_start < 0: num_start = 0 if num_end < 0: num_end = 0 if data.shape[0] < (num_start + num_end): raise ValueError('Number of events to discard greater than total' + ' number of events.') mask = np.ones(shape=data.shape[0],dtype=bool) mask[:num_start] = False if num_end > 0: # catch the edge case where `num_end=0` causes mask[-num_end:] to mask # off all events mask[-num_end:] = False gated_data = data[mask] if full_output: StartEndGateOutput = collections.namedtuple( 'StartEndGateOutput', ['gated_data', 'mask']) return StartEndGateOutput(gated_data=gated_data, mask=mask) else: return gated_data
Gate out first and last events. Parameters ---------- data : FCSData or numpy array NxD flow cytometry data where N is the number of events and D is the number of parameters (aka channels). num_start, num_end : int, optional Number of events to gate out from beginning and end of `data`. Ignored if less than 0. full_output : bool, optional Flag specifying to return additional outputs. If true, the outputs are given as a namedtuple. Returns ------- gated_data : FCSData or numpy array Gated flow cytometry data of the same format as `data`. mask : numpy array of bool, only if ``full_output==True`` Boolean gate mask used to gate data such that ``gated_data = data[mask]``. Raises ------ ValueError If the number of events to discard is greater than the total number of events in `data`.
def assoc(self, sitecol, assoc_dist, mode): """ :param sitecol: a (filtered) site collection :param assoc_dist: the maximum distance for association :param mode: 'strict', 'warn' or 'filter' :returns: filtered site collection, filtered objects, discarded """ assert mode in 'strict warn filter', mode dic = {} discarded = [] for sid, lon, lat in zip(sitecol.sids, sitecol.lons, sitecol.lats): obj, distance = self.get_closest(lon, lat) if assoc_dist is None: dic[sid] = obj # associate all elif distance <= assoc_dist: dic[sid] = obj # associate within elif mode == 'warn': dic[sid] = obj # associate outside logging.warning( 'The closest vs30 site (%.1f %.1f) is distant more than %d' ' km from site #%d (%.1f %.1f)', obj['lon'], obj['lat'], int(distance), sid, lon, lat) elif mode == 'filter': discarded.append(obj) elif mode == 'strict': raise SiteAssociationError( 'There is nothing closer than %s km ' 'to site (%s %s)' % (assoc_dist, lon, lat)) if not dic: raise SiteAssociationError( 'No sites could be associated within %s km' % assoc_dist) return (sitecol.filtered(dic), numpy.array([dic[sid] for sid in sorted(dic)]), discarded)
:param sitecol: a (filtered) site collection :param assoc_dist: the maximum distance for association :param mode: 'strict', 'warn' or 'filter' :returns: filtered site collection, filtered objects, discarded
def put(self, robj, w=None, dw=None, pw=None, return_body=None, if_none_match=None, timeout=None): """ Stores an object. """ raise NotImplementedError
Stores an object.
def get_blueprint(service_brokers: Union[List[ServiceBroker], ServiceBroker], broker_credentials: Union[None, List[BrokerCredentials], BrokerCredentials], logger: logging.Logger) -> Blueprint: """ Returns the blueprint with service broker api. :param service_brokers: Services that this broker exposes :param broker_credentials: Optional Usernames and passwords that will be required to communicate with service broker :param logger: Used for api logs. This will not influence Flasks logging behavior. :return: Blueprint to register with Flask app instance """ openbroker = Blueprint('open_broker', __name__) service_brokers = ensure_list(service_brokers) # Apply filters logger.debug("Apply print_request filter for debugging") openbroker.before_request(print_request) if DISABLE_VERSION_CHECK: logger.warning( "Minimum API version is not checked, this can cause illegal contracts between service broker and platform!" ) else: logger.debug("Apply check_version filter for version %s" % str(MIN_VERSION)) openbroker.before_request(check_version) logger.debug("Apply check_originating_identity filter") openbroker.before_request(check_originating_identity) if broker_credentials is not None: broker_credentials = ensure_list(broker_credentials) logger.debug("Apply check_auth filter with {} credentials".format(len(broker_credentials))) openbroker.before_request(get_auth_filter(broker_credentials)) def get_broker_by_id(service_id: str): for service in service_brokers: if service.service_id() == service_id: return service raise KeyError('Service {} not found'.format(service_id)) def add_service_id_to_async_response(response, service_id: str): if response.is_async: if response.operation is None: response.operation = service_id else: response.operation = ' '.join((service_id, response.operation)) def extract_authorization_username(request: Request): if request.authorization is not None: return request.authorization.username else: return None @openbroker.errorhandler(Exception) def error_handler(e): logger.exception(e) return to_json_response(ErrorResponse( description=str(e) )), HTTPStatus.INTERNAL_SERVER_ERROR @openbroker.errorhandler(NotImplementedError) def error_handler(e): logger.exception(e) return to_json_response(ErrorResponse( description=str(e) )), HTTPStatus.NOT_IMPLEMENTED @openbroker.route("/v2/catalog", methods=['GET']) def catalog(): """ :return: Catalog of broker (List of services) """ return to_json_response(CatalogResponse(list(s.catalog() for s in service_brokers))) @openbroker.route("/v2/service_instances/<instance_id>", methods=['PUT']) @requires_application_json def provision(instance_id): try: accepts_incomplete = 'true' == request.args.get("accepts_incomplete", 'false') provision_details = ProvisionDetails(**json.loads(request.data)) provision_details.originating_identity = request.originating_identity provision_details.authorization_username = extract_authorization_username(request) broker = get_broker_by_id(provision_details.service_id) if not broker.check_plan_id(provision_details.plan_id): raise TypeError('plan_id not found in this service.') except (TypeError, KeyError, JSONDecodeError) as e: logger.exception(e) return to_json_response(ErrorResponse(description=str(e))), HTTPStatus.BAD_REQUEST try: result = broker.provision(instance_id, provision_details, accepts_incomplete) add_service_id_to_async_response(result, broker.service_id()) except errors.ErrInstanceAlreadyExists as e: logger.exception(e) return to_json_response(EmptyResponse()), HTTPStatus.CONFLICT except errors.ErrInvalidParameters as e: return to_json_response(ErrorResponse('InvalidParameters', str(e))), HTTPStatus.BAD_REQUEST except errors.ErrAsyncRequired as e: logger.exception(e) return to_json_response(ErrorResponse( error="AsyncRequired", description="This service plan requires client support for asynchronous service operations." )), HTTPStatus.UNPROCESSABLE_ENTITY if result.state == ProvisionState.IS_ASYNC: return to_json_response(ProvisioningResponse(result.dashboard_url, result.operation)), HTTPStatus.ACCEPTED elif result.state == ProvisionState.IDENTICAL_ALREADY_EXISTS: return to_json_response(ProvisioningResponse(result.dashboard_url, result.operation)), HTTPStatus.OK elif result.state == ProvisionState.SUCCESSFUL_CREATED: return to_json_response(ProvisioningResponse(result.dashboard_url, result.operation)), HTTPStatus.CREATED else: raise errors.ServiceException('IllegalState, ProvisioningState unknown.') @openbroker.route("/v2/service_instances/<instance_id>", methods=['PATCH']) @requires_application_json def update(instance_id): try: accepts_incomplete = 'true' == request.args.get("accepts_incomplete", 'false') update_details = UpdateDetails(**json.loads(request.data)) update_details.originating_identity = request.originating_identity update_details.authorization_username = extract_authorization_username(request) broker = get_broker_by_id(update_details.service_id) if not broker.check_plan_id(update_details.plan_id): raise TypeError('plan_id not found in this service.') except (TypeError, KeyError, JSONDecodeError) as e: logger.exception(e) return to_json_response(ErrorResponse(description=str(e))), HTTPStatus.BAD_REQUEST try: result = broker.update(instance_id, update_details, accepts_incomplete) add_service_id_to_async_response(result, broker.service_id()) except errors.ErrInvalidParameters as e: return to_json_response(ErrorResponse('InvalidParameters', str(e))), HTTPStatus.BAD_REQUEST except errors.ErrAsyncRequired as e: logger.exception(e) return to_json_response(ErrorResponse( error="AsyncRequired", description="This service plan requires client support for asynchronous service operations." )), HTTPStatus.UNPROCESSABLE_ENTITY if result.is_async: return to_json_response(UpdateResponse(result.operation, result.dashboard_url)), HTTPStatus.ACCEPTED else: return to_json_response(UpdateResponse(None, result.dashboard_url)), HTTPStatus.OK @openbroker.route("/v2/service_instances/<instance_id>/service_bindings/<binding_id>", methods=['PUT']) @requires_application_json def bind(instance_id, binding_id): try: binding_details = BindDetails(**json.loads(request.data)) binding_details.originating_identity = request.originating_identity binding_details.authorization_username = extract_authorization_username(request) broker = get_broker_by_id(binding_details.service_id) if not broker.check_plan_id(binding_details.plan_id): raise TypeError('plan_id not found in this service.') except (TypeError, KeyError, JSONDecodeError) as e: logger.exception(e) return to_json_response(ErrorResponse(description=str(e))), HTTPStatus.BAD_REQUEST try: result = broker.bind(instance_id, binding_id, binding_details) except errors.ErrBindingAlreadyExists as e: logger.exception(e) return to_json_response(EmptyResponse()), HTTPStatus.CONFLICT except errors.ErrAppGuidNotProvided as e: logger.exception(e) return to_json_response(ErrorResponse( error="RequiresApp", description="This service supports generation of credentials through binding an application only." )), HTTPStatus.UNPROCESSABLE_ENTITY response = BindResponse( credentials=result.credentials, syslog_drain_url=result.syslog_drain_url, route_service_url=result.route_service_url, volume_mounts=result.volume_mounts ) if result.state == BindState.SUCCESSFUL_BOUND: return to_json_response(response), HTTPStatus.CREATED elif result.state == BindState.IDENTICAL_ALREADY_EXISTS: return to_json_response(response), HTTPStatus.OK else: raise errors.ServiceException('IllegalState, BindState unknown.') @openbroker.route("/v2/service_instances/<instance_id>/service_bindings/<binding_id>", methods=['DELETE']) def unbind(instance_id, binding_id): try: plan_id = request.args["plan_id"] service_id = request.args["service_id"] unbind_details = UnbindDetails(plan_id, service_id) unbind_details.originating_identity = request.originating_identity unbind_details.authorization_username = extract_authorization_username(request) broker = get_broker_by_id(unbind_details.service_id) if not broker.check_plan_id(unbind_details.plan_id): raise TypeError('plan_id not found in this service.') except (TypeError, KeyError) as e: logger.exception(e) return to_json_response(ErrorResponse(description=str(e))), HTTPStatus.BAD_REQUEST try: broker.unbind(instance_id, binding_id, unbind_details) except errors.ErrBindingDoesNotExist as e: logger.exception(e) return to_json_response(EmptyResponse()), HTTPStatus.GONE return to_json_response(EmptyResponse()), HTTPStatus.OK @openbroker.route("/v2/service_instances/<instance_id>", methods=['DELETE']) def deprovision(instance_id): try: plan_id = request.args["plan_id"] service_id = request.args["service_id"] accepts_incomplete = 'true' == request.args.get("accepts_incomplete", 'false') deprovision_details = DeprovisionDetails(plan_id, service_id) deprovision_details.originating_identity = request.originating_identity deprovision_details.authorization_username = extract_authorization_username(request) broker = get_broker_by_id(deprovision_details.service_id) if not broker.check_plan_id(deprovision_details.plan_id): raise TypeError('plan_id not found in this service.') except (TypeError, KeyError) as e: logger.exception(e) return to_json_response(ErrorResponse(description=str(e))), HTTPStatus.BAD_REQUEST try: result = broker.deprovision(instance_id, deprovision_details, accepts_incomplete) add_service_id_to_async_response(result, broker.service_id()) except errors.ErrInstanceDoesNotExist as e: logger.exception(e) return to_json_response(EmptyResponse()), HTTPStatus.GONE except errors.ErrAsyncRequired as e: logger.exception(e) return to_json_response(ErrorResponse( error="AsyncRequired", description="This service plan requires client support for asynchronous service operations." )), HTTPStatus.UNPROCESSABLE_ENTITY if result.is_async: return to_json_response(DeprovisionResponse(result.operation)), HTTPStatus.ACCEPTED else: return to_json_response(EmptyResponse()), HTTPStatus.OK @openbroker.route("/v2/service_instances/<instance_id>/last_operation", methods=['GET']) def last_operation(instance_id): # Not required # service_id = request.args.get("service_id", None) # plan_id = request.args.get("plan_id", None) operation_data = request.args.get("operation", None) data = operation_data.split(' ', maxsplit=1) service_id = data[0] if len(data) == 2: operation_data = data[1] else: operation_data = None try: broker = get_broker_by_id(service_id) except KeyError as e: logger.exception(e) return to_json_response(ErrorResponse(description=str(e))), HTTPStatus.BAD_REQUEST result = broker.last_operation(instance_id, operation_data) return to_json_response(LastOperationResponse(result.state, result.description)), HTTPStatus.OK return openbroker
Returns the blueprint with service broker api. :param service_brokers: Services that this broker exposes :param broker_credentials: Optional Usernames and passwords that will be required to communicate with service broker :param logger: Used for api logs. This will not influence Flasks logging behavior. :return: Blueprint to register with Flask app instance
def findall(self, string, pos=0, endpos=sys.maxint): """Return a list of all non-overlapping matches of pattern in string.""" matchlist = [] state = _State(string, pos, endpos, self.flags) while state.start <= state.end: state.reset() state.string_position = state.start if not state.search(self._code): break match = SRE_Match(self, state) if self.groups == 0 or self.groups == 1: item = match.group(self.groups) else: item = match.groups("") matchlist.append(item) if state.string_position == state.start: state.start += 1 else: state.start = state.string_position return matchlist
Return a list of all non-overlapping matches of pattern in string.
def rapidfire(self, max_nlaunch=-1, max_loops=1, sleep_time=5): """ Keeps submitting `Tasks` until we are out of jobs or no job is ready to run. Args: max_nlaunch: Maximum number of launches. default: no limit. max_loops: Maximum number of loops sleep_time: seconds to sleep between rapidfire loop iterations Returns: The number of tasks launched. """ num_launched, do_exit, launched = 0, False, [] for count in range(max_loops): if do_exit: break if count > 0: time.sleep(sleep_time) tasks = self.fetch_tasks_to_run() # I don't know why but we receive duplicated tasks. if any(task in launched for task in tasks): logger.critical("numtasks %d already in launched list:\n%s" % (len(tasks), launched)) # Preventive test. tasks = [t for t in tasks if t not in launched] if not tasks: continue for task in tasks: fired = task.start() if fired: launched.append(task) num_launched += 1 if num_launched >= max_nlaunch > 0: logger.info('num_launched >= max_nlaunch, going back to sleep') do_exit = True break # Update the database. self.flow.pickle_dump() return num_launched
Keeps submitting `Tasks` until we are out of jobs or no job is ready to run. Args: max_nlaunch: Maximum number of launches. default: no limit. max_loops: Maximum number of loops sleep_time: seconds to sleep between rapidfire loop iterations Returns: The number of tasks launched.
def Fsphere(q, R): """Scattering form-factor amplitude of a sphere normalized to F(q=0)=V Inputs: ------- ``q``: independent variable ``R``: sphere radius Formula: -------- ``4*pi/q^3 * (sin(qR) - qR*cos(qR))`` """ return 4 * np.pi / q ** 3 * (np.sin(q * R) - q * R * np.cos(q * R))
Scattering form-factor amplitude of a sphere normalized to F(q=0)=V Inputs: ------- ``q``: independent variable ``R``: sphere radius Formula: -------- ``4*pi/q^3 * (sin(qR) - qR*cos(qR))``
def find_checker(func: CallableT) -> Optional[CallableT]: """Iterate through the decorator stack till we find the contract checker.""" contract_checker = None # type: Optional[CallableT] for a_wrapper in _walk_decorator_stack(func): if hasattr(a_wrapper, "__preconditions__") or hasattr(a_wrapper, "__postconditions__"): contract_checker = a_wrapper return contract_checker
Iterate through the decorator stack till we find the contract checker.
def GetBoundingRectangles(self) -> list: """ Call IUIAutomationTextRange::GetBoundingRectangles. textAttributeId: int, a value in class `TextAttributeId`. Return list, a list of `Rect`. bounding rectangles for each fully or partially visible line of text in a text range.. Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextrange-getboundingrectangles for rect in textRange.GetBoundingRectangles(): print(rect.left, rect.top, rect.right, rect.bottom, rect.width(), rect.height(), rect.xcenter(), rect.ycenter()) """ floats = self.textRange.GetBoundingRectangles() rects = [] for i in range(len(floats) // 4): rect = Rect(int(floats[i * 4]), int(floats[i * 4 + 1]), int(floats[i * 4]) + int(floats[i * 4 + 2]), int(floats[i * 4 + 1]) + int(floats[i * 4 + 3])) rects.append(rect) return rects
Call IUIAutomationTextRange::GetBoundingRectangles. textAttributeId: int, a value in class `TextAttributeId`. Return list, a list of `Rect`. bounding rectangles for each fully or partially visible line of text in a text range.. Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextrange-getboundingrectangles for rect in textRange.GetBoundingRectangles(): print(rect.left, rect.top, rect.right, rect.bottom, rect.width(), rect.height(), rect.xcenter(), rect.ycenter())
def backend(entry): """ Default URL shortener backend for Zinnia. """ return '%s://%s%s' % ( PROTOCOL, Site.objects.get_current().domain, reverse('zinnia:entry_shortlink', args=[base36(entry.pk)]))
Default URL shortener backend for Zinnia.
def check(self, instance): """ Collect metrics for the given gunicorn instance. """ self.log.debug("Running instance: %s", instance) custom_tags = instance.get('tags', []) # Validate the config. if not instance or self.PROC_NAME not in instance: raise GUnicornCheckError("instance must specify: %s" % self.PROC_NAME) # Load the gunicorn master procedure. proc_name = instance.get(self.PROC_NAME) master_procs = self._get_master_proc_by_name(proc_name, custom_tags) # Fetch the worker procs and count their states. worker_procs = self._get_workers_from_procs(master_procs) working, idle = self._count_workers(worker_procs) # if no workers are running, alert CRITICAL, otherwise OK msg = "%s working and %s idle workers for %s" % (working, idle, proc_name) status = AgentCheck.CRITICAL if working == 0 and idle == 0 else AgentCheck.OK tags = ['app:' + proc_name] + custom_tags self.service_check(self.SVC_NAME, status, tags=tags, message=msg) # Submit the data. self.log.debug("instance %s procs - working:%s idle:%s" % (proc_name, working, idle)) self.gauge("gunicorn.workers", working, tags + self.WORKING_TAGS) self.gauge("gunicorn.workers", idle, tags + self.IDLE_TAGS)
Collect metrics for the given gunicorn instance.
async def delete(self, query, *, dc=None): """Delete existing prepared query Parameters: query (ObjectID): Query ID dc (str): Specify datacenter that will be used. Defaults to the agent's local datacenter. Results: bool: ``True`` on success """ query_id = extract_attr(query, keys=["ID"]) response = await self._api.delete("/v1/query", query_id, params={"dc": dc}) return response.status == 200
Delete existing prepared query Parameters: query (ObjectID): Query ID dc (str): Specify datacenter that will be used. Defaults to the agent's local datacenter. Results: bool: ``True`` on success
def _iter_info(self, niter, level=logging.INFO): """ Log iteration number and mismatch Parameters ---------- level logging level Returns ------- None """ max_mis = self.iter_mis[niter - 1] msg = ' Iter {:<d}. max mismatch = {:8.7f}'.format(niter, max_mis) logger.info(msg)
Log iteration number and mismatch Parameters ---------- level logging level Returns ------- None
def remove(self, state_element, recursive=True, force=False, destroy=True): """Remove item from state :param StateElement state_element: State or state element to be removed :param bool recursive: Only applies to removal of state and decides whether the removal should be called recursively on all child states :param bool force: if the removal should be forced without checking constraints :param bool destroy: a flag that signals that the state element will be fully removed and disassembled """ if isinstance(state_element, State): return self.remove_state(state_element.state_id, recursive=recursive, force=force, destroy=destroy) elif isinstance(state_element, Transition): return self.remove_transition(state_element.transition_id, destroy=destroy) elif isinstance(state_element, DataFlow): return self.remove_data_flow(state_element.data_flow_id, destroy=destroy) elif isinstance(state_element, ScopedVariable): return self.remove_scoped_variable(state_element.data_port_id, destroy=destroy) else: super(ContainerState, self).remove(state_element, force=force, destroy=destroy)
Remove item from state :param StateElement state_element: State or state element to be removed :param bool recursive: Only applies to removal of state and decides whether the removal should be called recursively on all child states :param bool force: if the removal should be forced without checking constraints :param bool destroy: a flag that signals that the state element will be fully removed and disassembled
def xml_to_region(xmlstr): '''Converts xml response to service bus region The xml format for region: <entry> <id>uuid:157c311f-081f-4b4a-a0ba-a8f990ffd2a3;id=1756759</id> <title type="text"></title> <updated>2013-04-10T18:25:29Z</updated> <content type="application/xml"> <RegionCodeDescription xmlns="http://schemas.microsoft.com/netservices/2010/10/servicebus/connect" xmlns:i="http://www.w3.org/2001/XMLSchema-instance"> <Code>East Asia</Code> <FullName>East Asia</FullName> </RegionCodeDescription> </content> </entry> ''' xmldoc = minidom.parseString(xmlstr) region = ServiceBusRegion() for desc in _MinidomXmlToObject.get_children_from_path(xmldoc, 'entry', 'content', 'RegionCodeDescription'): node_value = _MinidomXmlToObject.get_first_child_node_value(desc, 'Code') if node_value is not None: region.code = node_value node_value = _MinidomXmlToObject.get_first_child_node_value(desc, 'FullName') if node_value is not None: region.fullname = node_value return region
Converts xml response to service bus region The xml format for region: <entry> <id>uuid:157c311f-081f-4b4a-a0ba-a8f990ffd2a3;id=1756759</id> <title type="text"></title> <updated>2013-04-10T18:25:29Z</updated> <content type="application/xml"> <RegionCodeDescription xmlns="http://schemas.microsoft.com/netservices/2010/10/servicebus/connect" xmlns:i="http://www.w3.org/2001/XMLSchema-instance"> <Code>East Asia</Code> <FullName>East Asia</FullName> </RegionCodeDescription> </content> </entry>
def f_set(self, *args, **kwargs): """Sets annotations Items in args are added as `annotation` and `annotation_X` where 'X' is the position in args for following arguments. """ for idx, arg in enumerate(args): valstr = self._translate_key(idx) self.f_set_single(valstr, arg) for key, arg in kwargs.items(): self.f_set_single(key, arg)
Sets annotations Items in args are added as `annotation` and `annotation_X` where 'X' is the position in args for following arguments.
def read_file(self, file_name, section=None): """Read settings from specified ``section`` of config file.""" file_name, section = self.parse_file_name_and_section(file_name, section) if not os.path.isfile(file_name): raise SettingsFileNotFoundError(file_name) parser = self.make_parser() with open(file_name) as fp: parser.read_file(fp) settings = OrderedDict() if parser.has_section(section): section_dict = parser[section] self.section_found_while_reading = True else: section_dict = parser.defaults().copy() extends = section_dict.get('extends') if extends: extends = self.decode_value(extends) extends, extends_section = self.parse_file_name_and_section( extends, extender=file_name, extender_section=section) settings.update(self.read_file(extends, extends_section)) settings.update(section_dict) if not self.section_found_while_reading: raise SettingsFileSectionNotFoundError(section) return settings
Read settings from specified ``section`` of config file.
def get_host_mac(name=None, allow_array=False, **api_opts): ''' Get mac address from host record. Use `allow_array` to return possible multiple values. CLI Example: .. code-block:: bash salt-call infoblox.get_host_mac host=localhost.domain.com ''' data = get_host(name=name, **api_opts) if data and 'ipv4addrs' in data: l = [] for a in data['ipv4addrs']: if 'mac' in a: l.append(a['mac']) if allow_array: return l if l: return l[0] return None
Get mac address from host record. Use `allow_array` to return possible multiple values. CLI Example: .. code-block:: bash salt-call infoblox.get_host_mac host=localhost.domain.com
def format(self): """Format name for output. :return: Formatted name representation. """ name = self._primary.value[0] if self.surname: if name: name += ' ' name += self.surname if self._primary.value[2]: if name: name += ' ' name += self._primary.value[2] return name
Format name for output. :return: Formatted name representation.
def __convert_to_df(a, val_col=None, group_col=None, val_id=None, group_id=None): '''Hidden helper method to create a DataFrame with input data for further processing. Parameters ---------- a : array_like or pandas DataFrame object An array, any object exposing the array interface or a pandas DataFrame. Array must be two-dimensional. Second dimension may vary, i.e. groups may have different lengths. val_col : str, optional Name of a DataFrame column that contains dependent variable values (test or response variable). Values should have a non-nominal scale. Must be specified if `a` is a pandas DataFrame object. group_col : str, optional Name of a DataFrame column that contains independent variable values (grouping or predictor variable). Values should have a nominal scale (categorical). Must be specified if `a` is a pandas DataFrame object. val_id : int, optional Index of a column that contains dependent variable values (test or response variable). Should be specified if a NumPy ndarray is used as an input. It will be inferred from data, if not specified. group_id : int, optional Index of a column that contains independent variable values (grouping or predictor variable). Should be specified if a NumPy ndarray is used as an input. It will be inferred from data, if not specified. Returns ------- x : pandas DataFrame DataFrame with input data, `val_col` column contains numerical values and `group_col` column contains categorical values. val_col : str Name of a DataFrame column that contains dependent variable values (test or response variable). group_col : str Name of a DataFrame column that contains independent variable values (grouping or predictor variable). Notes ----- Inferrence algorithm for determining `val_id` and `group_id` args is rather simple, so it is better to specify them explicitly to prevent errors. ''' if not group_col: group_col = 'groups' if not val_col: val_col = 'vals' if isinstance(a, DataFrame): x = a.copy() if not {group_col, val_col}.issubset(a.columns): raise ValueError('Specify correct column names using `group_col` and `val_col` args') return x, val_col, group_col elif isinstance(a, list) or (isinstance(a, np.ndarray) and not a.shape.count(2)): grps_len = map(len, a) grps = list(it.chain(*[[i+1] * l for i, l in enumerate(grps_len)])) vals = list(it.chain(*a)) return DataFrame({val_col: vals, group_col: grps}), val_col, group_col elif isinstance(a, np.ndarray): # cols ids not defined # trying to infer if not(all([val_id, group_id])): if np.argmax(a.shape): a = a.T ax = [np.unique(a[:, 0]).size, np.unique(a[:, 1]).size] if np.asscalar(np.diff(ax)): __val_col = np.argmax(ax) __group_col = np.argmin(ax) else: raise ValueError('Cannot infer input format.\nPlease specify `val_id` and `group_id` args') cols = {__val_col: val_col, __group_col: group_col} else: cols = {val_id: val_col, group_id: group_col} cols_vals = dict(sorted(cols.items())).values() return DataFrame(a, columns=cols_vals), val_col, group_col
Hidden helper method to create a DataFrame with input data for further processing. Parameters ---------- a : array_like or pandas DataFrame object An array, any object exposing the array interface or a pandas DataFrame. Array must be two-dimensional. Second dimension may vary, i.e. groups may have different lengths. val_col : str, optional Name of a DataFrame column that contains dependent variable values (test or response variable). Values should have a non-nominal scale. Must be specified if `a` is a pandas DataFrame object. group_col : str, optional Name of a DataFrame column that contains independent variable values (grouping or predictor variable). Values should have a nominal scale (categorical). Must be specified if `a` is a pandas DataFrame object. val_id : int, optional Index of a column that contains dependent variable values (test or response variable). Should be specified if a NumPy ndarray is used as an input. It will be inferred from data, if not specified. group_id : int, optional Index of a column that contains independent variable values (grouping or predictor variable). Should be specified if a NumPy ndarray is used as an input. It will be inferred from data, if not specified. Returns ------- x : pandas DataFrame DataFrame with input data, `val_col` column contains numerical values and `group_col` column contains categorical values. val_col : str Name of a DataFrame column that contains dependent variable values (test or response variable). group_col : str Name of a DataFrame column that contains independent variable values (grouping or predictor variable). Notes ----- Inferrence algorithm for determining `val_id` and `group_id` args is rather simple, so it is better to specify them explicitly to prevent errors.
def remove_prefix(self, id): """ Remove a prefix. """ try: p = Prefix.get(int(id)) p.remove() except NipapError, e: return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__}) return json.dumps(p, cls=NipapJSONEncoder)
Remove a prefix.
def setPage(self, pageId, page): """ Sets the page and id for the given page vs. auto-constructing it. This will allow the developer to create a custom order for IDs. :param pageId | <int> page | <projexui.widgets.xoverlaywizard.XOverlayWizardPage> """ page.setParent(self) if self.property("useShadow") is not False: # create the drop shadow effect effect = QtGui.QGraphicsDropShadowEffect(page) effect.setColor(QtGui.QColor('black')) effect.setBlurRadius(50) effect.setOffset(0, 0) page.setGraphicsEffect(effect) self._pages[pageId] = page if self._startId == -1: self._startId = pageId
Sets the page and id for the given page vs. auto-constructing it. This will allow the developer to create a custom order for IDs. :param pageId | <int> page | <projexui.widgets.xoverlaywizard.XOverlayWizardPage>
def _processFailedSuccessors(self, jobGraph): """Some of the jobs successors failed then either fail the job or restart it if it has retries left and is a checkpoint job""" if jobGraph.jobStoreID in self.toilState.servicesIssued: # The job has services running, signal for them to be killed # once they are killed then the jobGraph will be re-added to # the updatedJobs set and then scheduled to be removed logger.debug("Telling job: %s to terminate its services due to successor failure", jobGraph.jobStoreID) self.serviceManager.killServices(self.toilState.servicesIssued[jobGraph.jobStoreID], error=True) elif jobGraph.jobStoreID in self.toilState.successorCounts: # The job has non-service jobs running wait for them to finish # the job will be re-added to the updated jobs when these jobs # are done logger.debug("Job %s with ID: %s with failed successors still has successor jobs running", jobGraph, jobGraph.jobStoreID) elif jobGraph.checkpoint is not None and jobGraph.remainingRetryCount > 1: # If the job is a checkpoint and has remaining retries then reissue it. # The logic behind using > 1 rather than > 0 here: Since this job has # been tried once (without decreasing its retry count as the job # itself was successful), and its subtree failed, it shouldn't be retried # unless it has more than 1 try. logger.warn('Job: %s is being restarted as a checkpoint after the total ' 'failure of jobs in its subtree.', jobGraph.jobStoreID) self.issueJob(JobNode.fromJobGraph(jobGraph)) else: # Mark it totally failed logger.debug("Job %s is being processed as completely failed", jobGraph.jobStoreID) self.processTotallyFailedJob(jobGraph)
Some of the jobs successors failed then either fail the job or restart it if it has retries left and is a checkpoint job
def save(self, dolist=0): """Return .par format string for this parameter If dolist is set, returns fields as a list of strings. Default is to return a single string appropriate for writing to a file. """ quoted = not dolist fields = 7*[""] fields[0] = self.name fields[1] = self.type fields[2] = self.mode fields[3] = self.toString(self.value,quoted=quoted) if self.choice is not None: schoice = list(map(self.toString, self.choice)) schoice.insert(0,'') schoice.append('') fields[4] = repr('|'.join(schoice)) elif self.min not in [None,INDEF]: fields[4] = self.toString(self.min,quoted=quoted) if self.max not in [None,INDEF]: fields[5] = self.toString(self.max,quoted=quoted) if self.prompt: if quoted: sprompt = repr(self.prompt) else: sprompt = self.prompt # prompt can have embedded newlines (which are printed) sprompt = sprompt.replace(r'\012', '\n') sprompt = sprompt.replace(r'\n', '\n') fields[6] = sprompt # delete trailing null parameters for i in [6,5,4]: if fields[i] != "": break del fields[i] if dolist: return fields else: return ','.join(fields)
Return .par format string for this parameter If dolist is set, returns fields as a list of strings. Default is to return a single string appropriate for writing to a file.
def In(sigOrVal, iterable): """ Hdl convertible in operator, check if any of items in "iterable" equals "sigOrVal" """ res = None for i in iterable: i = toHVal(i) if res is None: res = sigOrVal._eq(i) else: res = res | sigOrVal._eq(i) assert res is not None, "Parameter iterable is empty" return res
Hdl convertible in operator, check if any of items in "iterable" equals "sigOrVal"
def set_to_current(self, ): """Set the selection to the currently open one :returns: None :rtype: None :raises: None """ cur = self.get_current_file() if cur is not None: self.set_selection(cur) else: self.init_selection()
Set the selection to the currently open one :returns: None :rtype: None :raises: None
def run(self): """main control loop for thread""" while True: try: cursor = JSON_CLIENT.json_client['local']['oplog.rs'].find( {'ts': {'$gt': self.last_timestamp}}) except TypeError: # filesystem, so .json_client is a bool and not iterable pass else: # http://stackoverflow.com/questions/30401063/pymongo-tailing-oplog cursor.add_option(2) # tailable cursor.add_option(8) # oplog_replay cursor.add_option(32) # await data self._retry() for doc in cursor: self.last_timestamp = doc['ts'] if doc['ns'] in self.receivers: self._run_namespace(doc) time.sleep(1)
main control loop for thread
def evaluate_script(self): """ Evaluates current **Script_Editor_tabWidget** Widget tab Model editor content into the interactive console. :return: Method success. :rtype: bool """ editor = self.get_current_editor() if not editor: return False LOGGER.debug("> Evaluating 'Script Editor' content.") if self.evaluate_code(foundations.strings.to_string(editor.toPlainText().toUtf8())): self.ui_refresh.emit() return True
Evaluates current **Script_Editor_tabWidget** Widget tab Model editor content into the interactive console. :return: Method success. :rtype: bool
def set_display_mode(self, zoom,layout='continuous'): """Set display mode in viewer The "zoom" argument may be 'fullpage', 'fullwidth', 'real', 'default', or a number, interpreted as a percentage.""" if(zoom=='fullpage' or zoom=='fullwidth' or zoom=='real' or zoom=='default' or not isinstance(zoom,basestring)): self.zoom_mode=zoom else: self.error('Incorrect zoom display mode: '+zoom) if(layout=='single' or layout=='continuous' or layout=='two' or layout=='default'): self.layout_mode=layout else: self.error('Incorrect layout display mode: '+layout)
Set display mode in viewer The "zoom" argument may be 'fullpage', 'fullwidth', 'real', 'default', or a number, interpreted as a percentage.
def serialize(self, now=None): """ Serialize this SubSection and all children to lxml Element and return it. :param str now: Default value for CREATED if none set :return: dmdSec/techMD/rightsMD/sourceMD/digiprovMD Element with all children """ created = self.created if self.created is not None else now el = etree.Element(utils.lxmlns("mets") + self.subsection, ID=self.id_string) if created: # Don't add CREATED if none was parsed el.set("CREATED", created) status = self.get_status() if status: el.set("STATUS", status) if self.contents: el.append(self.contents.serialize()) return el
Serialize this SubSection and all children to lxml Element and return it. :param str now: Default value for CREATED if none set :return: dmdSec/techMD/rightsMD/sourceMD/digiprovMD Element with all children
def vcs_init(self): """Initialize VCS repository.""" VCS(os.path.join(self.outdir, self.name), self.pkg_data)
Initialize VCS repository.
def reduce_resource_name_to_task(res_name): """ Assuming that the convention of naming resources associated with tasks as res[TASK][number], reduces such resource names to just the name of the task. This ensures that multiple copies of the same resource are treated the same. Resource names of different formats will be left untouched. """ # Reduce resource names to tasks being rewarded if res_name[:3].lower() != "res": return res_name res_name = res_name[3:].lower() while res_name[-1].isdigit(): res_name = res_name[:-1] return res_name
Assuming that the convention of naming resources associated with tasks as res[TASK][number], reduces such resource names to just the name of the task. This ensures that multiple copies of the same resource are treated the same. Resource names of different formats will be left untouched.
def configure_slack_logger( self, slack_webhook=None, log_level='ERROR', log_format=ReportingFormats.SLACK_PRINT.value, custom_args='' ): """logger for sending messages to Slack. Easy way to alert humans of issues Note: Will try to overwrite minimum log level to enable requested log_level Will warn and not attach hipchat logger if missing webhook key Learn more about webhooks: https://api.slack.com/docs/message-attachments Args: slack_webhook (str): slack bot webhook (full URL) log_level (str): desired log level for handle https://docs.python.org/3/library/logging.html#logging-levels log_format (str): format for logging messages https://docs.python.org/3/library/logging.html#logrecord-attributes custom_args (str): special ID to include in messages """ # Override defaults if required # slack_webhook = self.config.get_option( 'LOGGING', 'slack_webhook', None, slack_webhook ) log_level = self.config.get_option( 'LOGGING', 'slack_level', None, log_level ) # Actually build slack logging handler # # vv TODO vv: Test review # slack_handler = HackySlackHandler( slack_webhook ) self._configure_common( 'slack_', log_level, log_format, 'Slack', slack_handler, custom_args=custom_args )
logger for sending messages to Slack. Easy way to alert humans of issues Note: Will try to overwrite minimum log level to enable requested log_level Will warn and not attach hipchat logger if missing webhook key Learn more about webhooks: https://api.slack.com/docs/message-attachments Args: slack_webhook (str): slack bot webhook (full URL) log_level (str): desired log level for handle https://docs.python.org/3/library/logging.html#logging-levels log_format (str): format for logging messages https://docs.python.org/3/library/logging.html#logrecord-attributes custom_args (str): special ID to include in messages
def setsebool(boolean, value, persist=False): ''' Set the value for a boolean CLI Example: .. code-block:: bash salt '*' selinux.setsebool virt_use_usb off ''' if persist: cmd = 'setsebool -P {0} {1}'.format(boolean, value) else: cmd = 'setsebool {0} {1}'.format(boolean, value) return not __salt__['cmd.retcode'](cmd, python_shell=False)
Set the value for a boolean CLI Example: .. code-block:: bash salt '*' selinux.setsebool virt_use_usb off
def _put(self, url, data={}): """Wrapper around request.put() to use the API prefix. Returns a JSON response.""" r = requests.put(self._api_prefix + url, data=json.dumps(data), headers=self.headers, auth=self.auth, allow_redirects=False, ) return self._action(r)
Wrapper around request.put() to use the API prefix. Returns a JSON response.
def create_pie_chart(self, snapshot, filename=''): """ Create a pie chart that depicts the distribution of the allocated memory for a given `snapshot`. The chart is saved to `filename`. """ try: from pylab import figure, title, pie, axes, savefig from pylab import sum as pylab_sum except ImportError: return self.nopylab_msg % ("pie_chart") # Don't bother illustrating a pie without pieces. if not snapshot.tracked_total: return '' classlist = [] sizelist = [] for k, v in list(snapshot.classes.items()): if v['pct'] > 3.0: classlist.append(k) sizelist.append(v['sum']) sizelist.insert(0, snapshot.asizeof_total - pylab_sum(sizelist)) classlist.insert(0, 'Other') #sizelist = [x*0.01 for x in sizelist] title("Snapshot (%s) Memory Distribution" % (snapshot.desc)) figure(figsize=(8,8)) axes([0.1, 0.1, 0.8, 0.8]) pie(sizelist, labels=classlist) savefig(filename, dpi=50) return self.chart_tag % (self.relative_path(filename))
Create a pie chart that depicts the distribution of the allocated memory for a given `snapshot`. The chart is saved to `filename`.
def get_repository_form_for_create(self, repository_record_types=None): """Gets the repository form for creating new repositories. A new form should be requested for each create transaction. arg: repository_record_types (osid.type.Type[]): array of repository record types return: (osid.repository.RepositoryForm) - the repository form raise: NullArgument - ``repository_record_types`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - unable to get form for requested record types *compliance: mandatory -- This method must be implemented.* """ # Implemented from awsosid template for - # osid.resource.BinAdminSession.get_bin_form_for_create_template if not self._can('create'): raise PermissionDenied() else: return self._provider_session.get_repository_form_for_create(repository_record_types)
Gets the repository form for creating new repositories. A new form should be requested for each create transaction. arg: repository_record_types (osid.type.Type[]): array of repository record types return: (osid.repository.RepositoryForm) - the repository form raise: NullArgument - ``repository_record_types`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - unable to get form for requested record types *compliance: mandatory -- This method must be implemented.*
def find_bounding_indices(arr, values, axis, from_below=True): """Find the indices surrounding the values within arr along axis. Returns a set of above, below, good. Above and below are lists of arrays of indices. These lists are formulated such that they can be used directly to index into a numpy array and get the expected results (no extra slices or ellipsis necessary). `good` is a boolean array indicating the "columns" that actually had values to bound the desired value(s). Parameters ---------- arr : array-like Array to search for values values: array-like One or more values to search for in `arr` axis : int The dimension of `arr` along which to search. from_below : bool, optional Whether to search from "below" (i.e. low indices to high indices). If `False`, the search will instead proceed from high indices to low indices. Defaults to `True`. Returns ------- above : list of arrays List of broadcasted indices to the location above the desired value below : list of arrays List of broadcasted indices to the location below the desired value good : array Boolean array indicating where the search found proper bounds for the desired value """ # The shape of generated indices is the same as the input, but with the axis of interest # replaced by the number of values to search for. indices_shape = list(arr.shape) indices_shape[axis] = len(values) # Storage for the found indices and the mask for good locations indices = np.empty(indices_shape, dtype=np.int) good = np.empty(indices_shape, dtype=np.bool) # Used to put the output in the proper location store_slice = [slice(None)] * arr.ndim # Loop over all of the values and for each, see where the value would be found from a # linear search for level_index, value in enumerate(values): # Look for changes in the value of the test for <= value in consecutive points # Taking abs() because we only care if there is a flip, not which direction. switches = np.abs(np.diff((arr <= value).astype(np.int), axis=axis)) # Good points are those where it's not just 0's along the whole axis good_search = np.any(switches, axis=axis) if from_below: # Look for the first switch; need to add 1 to the index since argmax is giving the # index within the difference array, which is one smaller. index = switches.argmax(axis=axis) + 1 else: # Generate a list of slices to reverse the axis of interest so that searching from # 0 to N is starting at the "top" of the axis. arr_slice = [slice(None)] * arr.ndim arr_slice[axis] = slice(None, None, -1) # Same as above, but we use the slice to come from the end; then adjust those # indices to measure from the front. index = arr.shape[axis] - 1 - switches[tuple(arr_slice)].argmax(axis=axis) # Set all indices where the results are not good to 0 index[~good_search] = 0 # Put the results in the proper slice store_slice[axis] = level_index indices[tuple(store_slice)] = index good[tuple(store_slice)] = good_search # Create index values for broadcasting arrays above = broadcast_indices(arr, indices, arr.ndim, axis) below = broadcast_indices(arr, indices - 1, arr.ndim, axis) return above, below, good
Find the indices surrounding the values within arr along axis. Returns a set of above, below, good. Above and below are lists of arrays of indices. These lists are formulated such that they can be used directly to index into a numpy array and get the expected results (no extra slices or ellipsis necessary). `good` is a boolean array indicating the "columns" that actually had values to bound the desired value(s). Parameters ---------- arr : array-like Array to search for values values: array-like One or more values to search for in `arr` axis : int The dimension of `arr` along which to search. from_below : bool, optional Whether to search from "below" (i.e. low indices to high indices). If `False`, the search will instead proceed from high indices to low indices. Defaults to `True`. Returns ------- above : list of arrays List of broadcasted indices to the location above the desired value below : list of arrays List of broadcasted indices to the location below the desired value good : array Boolean array indicating where the search found proper bounds for the desired value
def nla_put_u8(msg, attrtype, value): """Add 8 bit integer attribute to Netlink message. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/attr.c#L563 Positional arguments: msg -- Netlink message (nl_msg class instance). attrtype -- attribute type (integer). value -- numeric value to store as payload (int() or c_uint8()). Returns: 0 on success or a negative error code. """ data = bytearray(value if isinstance(value, c_uint8) else c_uint8(value)) return nla_put(msg, attrtype, SIZEOF_U8, data)
Add 8 bit integer attribute to Netlink message. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/attr.c#L563 Positional arguments: msg -- Netlink message (nl_msg class instance). attrtype -- attribute type (integer). value -- numeric value to store as payload (int() or c_uint8()). Returns: 0 on success or a negative error code.
def score_url(self, url): """ Give an url a score which can be used to choose preferred URLs for a given project release. """ t = urlparse(url) return (t.scheme != 'https', 'pypi.python.org' in t.netloc, posixpath.basename(t.path))
Give an url a score which can be used to choose preferred URLs for a given project release.
def cmd_rally_add(self, args): '''handle rally add''' if len(args) < 1: alt = self.settings.rallyalt else: alt = float(args[0]) if len(args) < 2: break_alt = self.settings.rally_breakalt else: break_alt = float(args[1]) if len(args) < 3: flag = self.settings.rally_flags else: flag = int(args[2]) #currently only supporting autoland values: #True (nonzero) and False (zero) if (flag != 0): flag = 2 if not self.have_list: print("Please list rally points first") return if (self.rallyloader.rally_count() > 4): print ("Only 5 rally points possible per flight plan.") return try: latlon = self.module('map').click_position except Exception: print("No map available") return if latlon is None: print("No map click position available") return land_hdg = 0.0 self.rallyloader.create_and_append_rally_point(latlon[0] * 1e7, latlon[1] * 1e7, alt, break_alt, land_hdg, flag) self.send_rally_points() print("Added Rally point at %s %f %f, autoland: %s" % (str(latlon), alt, break_alt, bool(flag & 2)))
handle rally add
def is_all_Ns(self, start=0, end=None): '''Returns true if the sequence is all Ns (upper or lower case)''' if end is not None: if start > end: raise Error('Error in is_all_Ns. Start coord must be <= end coord') end += 1 else: end = len(self) if len(self) == 0: return False else: return re.search('[^Nn]', self.seq[start:end]) is None
Returns true if the sequence is all Ns (upper or lower case)
def printPi(self): """ Prints all states state and their steady state probabilities. Not recommended for large state spaces. """ assert self.pi is not None, "Calculate pi before calling printPi()" assert len(self.mapping)>0, "printPi() can only be used in combination with the direct or indirect method. Use print(mc.pi) if your subclass is called mc." for key,state in self.mapping.items(): print(state,self.pi[key])
Prints all states state and their steady state probabilities. Not recommended for large state spaces.
def add_member(self, member, dn=False): """Add a member to the bound group Arguments: member -- the CSHMember object (or distinguished name) of the member Keyword arguments: dn -- whether or not member is a distinguished name """ if dn: if self.check_member(member, dn=True): return mod = (ldap.MOD_ADD, 'member', member.encode('ascii')) else: if self.check_member(member): return mod = (ldap.MOD_ADD, 'member', member.get_dn().encode('ascii')) if self.__lib__.__batch_mods__: self.__lib__.enqueue_mod(self.__dn__, mod) elif not self.__lib__.__ro__: mod_attrs = [mod] self.__con__.modify_s(self.__dn__, mod_attrs) else: print("ADD VALUE member = {} FOR {}".format(mod[2], self.__dn__))
Add a member to the bound group Arguments: member -- the CSHMember object (or distinguished name) of the member Keyword arguments: dn -- whether or not member is a distinguished name
def restore_renamed_serializable_attributes(self): """Hook for the future if attributes have been renamed. The old attribute names will have been restored in the __dict__.update in __setstate__, so this routine should move attribute values to their new names. """ if hasattr(self, 'start_addr'): self.origin = self.start_addr log.debug(f"moving start_addr to origin: {self.start_addr}") delattr(self, 'start_addr')
Hook for the future if attributes have been renamed. The old attribute names will have been restored in the __dict__.update in __setstate__, so this routine should move attribute values to their new names.
def cmdline_params(self, surface_sample_file_name, cell_file_name): """Synthesize command line parameters e.g. [ ['struct.vsa'], ['struct.cell']] """ parameters = [] parameters += [surface_sample_file_name] parameters += [cell_file_name] parameters += [self._OUTPUT_FILE_NAME] return map(str, parameters)
Synthesize command line parameters e.g. [ ['struct.vsa'], ['struct.cell']]
def submit(self): """Submit this torrent and create a new task""" if self.api._req_lixian_add_task_bt(self): self.submitted = True return True return False
Submit this torrent and create a new task
def parse(input_string, prefix=''): """Parses the given DSL string and returns parsed results. Args: input_string (str): DSL string prefix (str): Optional prefix to add to every element name, useful to namespace things Returns: dict: Parsed content """ tree = parser.parse(input_string) visitor = ChatlVisitor(prefix) visit_parse_tree(tree, visitor) return visitor.parsed
Parses the given DSL string and returns parsed results. Args: input_string (str): DSL string prefix (str): Optional prefix to add to every element name, useful to namespace things Returns: dict: Parsed content
def attribute(self, attribute_id, action='GET', params=None): """ Gets the attribute from a Group/Indicator or Victim Args: action: params: attribute_id: Returns: attribute json """ if params is None: params = {} if not self.can_update(): self._tcex.handle_error(910, [self.type]) if action == 'GET': return self.tc_requests.get_attribute( self.api_type, self.api_sub_type, self.unique_id, attribute_id, owner=self.owner, params=params, ) if action == 'DELETE': return self.tc_requests.delete_attribute( self.api_type, self.api_sub_type, self.unique_id, attribute_id, owner=self.owner ) self._tcex.handle_error(925, ['action', 'attribute', 'action', 'action', action]) return None
Gets the attribute from a Group/Indicator or Victim Args: action: params: attribute_id: Returns: attribute json
def subsample_snps_map(seqchunk, nmask, maparr): """ removes ncolumns from snparray prior to matrix calculation, and subsamples 'linked' snps (those from the same RAD locus) such that for these four samples only 1 SNP per locus is kept. This information comes from the 'map' array (map file). """ ## mask columns that contain Ns rmask = np.zeros(seqchunk.shape[1], dtype=np.bool_) ## apply mask to the mapfile last_loc = -1 for idx in xrange(maparr.shape[0]): if maparr[idx] != last_loc: if not nmask[idx]: rmask[idx] = True last_loc = maparr[idx] ## apply mask #newarr = seqchunk[:, rmask] ## return smaller Nmasked array return rmask
removes ncolumns from snparray prior to matrix calculation, and subsamples 'linked' snps (those from the same RAD locus) such that for these four samples only 1 SNP per locus is kept. This information comes from the 'map' array (map file).
def _handle_raw_book(self, dtype, data, ts): """Updates the raw order books stored in self.raw_books[chan_id]. :param dtype: :param data: :param ts: :return: """ self.log.debug("_handle_raw_book: %s - %s - %s", dtype, data, ts) channel_id, *data = data channel_identifier = self.channel_directory[channel_id] entry = (data, ts) self.raw_books[channel_identifier].put(entry)
Updates the raw order books stored in self.raw_books[chan_id]. :param dtype: :param data: :param ts: :return:
def addIDs(self, asfield=False): """ Generate point and cell ids. :param bool asfield: flag to control whether to generate scalar or field data. """ ids = vtk.vtkIdFilter() ids.SetInputData(self.poly) ids.PointIdsOn() ids.CellIdsOn() if asfield: ids.FieldDataOn() else: ids.FieldDataOff() ids.Update() return self.updateMesh(ids.GetOutput())
Generate point and cell ids. :param bool asfield: flag to control whether to generate scalar or field data.
def is_module_installed(module_name, version=None, installed_version=None, interpreter=None): """ Return True if module *module_name* is installed If version is not None, checking module version (module must have an attribute named '__version__') version may starts with =, >=, > or < to specify the exact requirement ; multiple conditions may be separated by ';' (e.g. '>=0.13;<1.0') interpreter: check if a module is installed with a given version in a determined interpreter """ if interpreter: if osp.isfile(interpreter) and ('python' in interpreter): checkver = inspect.getsource(check_version) get_modver = inspect.getsource(get_module_version) stable_ver = inspect.getsource(is_stable_version) ismod_inst = inspect.getsource(is_module_installed) f = tempfile.NamedTemporaryFile('wt', suffix='.py', dir=get_temp_dir(), delete=False) try: script = f.name f.write("# -*- coding: utf-8 -*-" + "\n\n") f.write("from distutils.version import LooseVersion" + "\n") f.write("import re" + "\n\n") f.write(stable_ver + "\n") f.write(checkver + "\n") f.write(get_modver + "\n") f.write(ismod_inst + "\n") if version: f.write("print(is_module_installed('%s','%s'))"\ % (module_name, version)) else: f.write("print(is_module_installed('%s'))" % module_name) # We need to flush and sync changes to ensure that the content # of the file is in disk before running the script f.flush() os.fsync(f) f.close() try: proc = run_program(interpreter, [script]) output, _err = proc.communicate() except subprocess.CalledProcessError: return True return eval(output.decode()) finally: if not f.closed: f.close() os.remove(script) else: # Try to not take a wrong decision if there is no interpreter # available (needed for the change_pystartup method of ExtConsole # config page) return True else: if installed_version is None: try: actver = get_module_version(module_name) except: # Module is not installed return False else: actver = installed_version if actver is None and version is not None: return False elif version is None: return True else: if ';' in version: output = True for ver in version.split(';'): output = output and is_module_installed(module_name, ver) return output match = re.search(r'[0-9]', version) assert match is not None, "Invalid version number" symb = version[:match.start()] if not symb: symb = '=' assert symb in ('>=', '>', '=', '<', '<='),\ "Invalid version condition '%s'" % symb version = version[match.start():] return check_version(actver, version, symb)
Return True if module *module_name* is installed If version is not None, checking module version (module must have an attribute named '__version__') version may starts with =, >=, > or < to specify the exact requirement ; multiple conditions may be separated by ';' (e.g. '>=0.13;<1.0') interpreter: check if a module is installed with a given version in a determined interpreter
def __exists_row_not_too_old(self, row): """ Check if the given row exists and is not too old """ if row is None: return False record_time = dateutil.parser.parse(row[2]) now = datetime.datetime.now(dateutil.tz.gettz()) age = (record_time - now).total_seconds() if age > self.max_age: return False return True
Check if the given row exists and is not too old
def cmap_from_text(filename, norm=False, transparency=False, hex=False): ''' cmap_from_text takes as input a file that contains a colormap in text format composed by lines with 3 values in the range [0,255] or [00,FF] and returns a tuple of integers. If the parameters cat and tot are given, the function generates a transparency value for this color and returns a tuple of length 4. tot is the total number of colors in the colormap cat is the index of the current colour in the colormap if norm is set to True, the input values are normalized between 0 and 1. ''' lines = [line.rstrip('\n') for line in open(filename)] _colors=[] _tot = len(lines) _index = 1 for i in lines: if transparency: _colors.append(_text_to_rgb(i,norm=norm,cat=_index,tot=_tot,hex=hex)) else: _colors.append(_text_to_rgb(i,norm=norm,hex=hex)) _index = _index + 1 return _make_cmap(_colors)
cmap_from_text takes as input a file that contains a colormap in text format composed by lines with 3 values in the range [0,255] or [00,FF] and returns a tuple of integers. If the parameters cat and tot are given, the function generates a transparency value for this color and returns a tuple of length 4. tot is the total number of colors in the colormap cat is the index of the current colour in the colormap if norm is set to True, the input values are normalized between 0 and 1.
def unpublish_view(self, request, object_id): """ Instantiates a class-based view that redirects to Wagtail's 'unpublish' view for models that extend 'Page' (if the user has sufficient permissions). We do this via our own view so that we can reliably control redirection of the user back to the index_view once the action is completed. The view class used can be overridden by changing the 'unpublish_view_class' attribute. """ kwargs = {'model_admin': self, 'object_id': object_id} view_class = self.unpublish_view_class return view_class.as_view(**kwargs)(request)
Instantiates a class-based view that redirects to Wagtail's 'unpublish' view for models that extend 'Page' (if the user has sufficient permissions). We do this via our own view so that we can reliably control redirection of the user back to the index_view once the action is completed. The view class used can be overridden by changing the 'unpublish_view_class' attribute.
def compute_diffusion_maps(lapl_type, diffusion_map, lambdas, diffusion_time): """ Credit to Satrajit Ghosh (http://satra.cogitatum.org/) for final steps """ # Check that diffusion maps is using the correct laplacian, warn otherwise if lapl_type not in ['geometric', 'renormalized']: warnings.warn("for correct diffusion maps embedding use laplacian type 'geometric' or 'renormalized'.") # Step 5 of diffusion maps: vectors = diffusion_map.copy() psi = vectors/vectors[:,[0]] diffusion_times = diffusion_time if diffusion_time == 0: lambdas = np.abs(lambdas) diffusion_times = np.exp(1. - np.log(1 - lambdas[1:])/np.log(lambdas[1:])) lambdas = lambdas / (1 - lambdas) else: lambdas = np.abs(lambdas) lambdas = lambdas ** float(diffusion_time) diffusion_map = psi * lambdas return diffusion_map
Credit to Satrajit Ghosh (http://satra.cogitatum.org/) for final steps
def captured_output(stream_name): """Return a context manager used by captured_stdout/stdin/stderr that temporarily replaces the sys stream *stream_name* with a StringIO. Taken from Lib/support/__init__.py in the CPython repo. """ orig_stdout = getattr(sys, stream_name) setattr(sys, stream_name, StreamWrapper.from_stream(orig_stdout)) try: yield getattr(sys, stream_name) finally: setattr(sys, stream_name, orig_stdout)
Return a context manager used by captured_stdout/stdin/stderr that temporarily replaces the sys stream *stream_name* with a StringIO. Taken from Lib/support/__init__.py in the CPython repo.
def _transform_in(self): """Return array of coordinates that can be mapped by Transform classes.""" return np.array([ [self.left, self.bottom, 0, 1], [self.right, self.top, 0, 1]])
Return array of coordinates that can be mapped by Transform classes.
def get_tetrahedra_integration_weight(omegas, tetrahedra_omegas, function='I'): """Returns integration weights Parameters ---------- omegas : float or list of float values Energy(s) at which the integration weight(s) are computed. tetrahedra_omegas : ndarray of list of list Energies at vertices of 24 tetrahedra shape=(24, 4) dytpe='double' function : str, 'I' or 'J' 'J' is for intetration and 'I' is for its derivative. """ if isinstance(omegas, float): return phonoc.tetrahedra_integration_weight( omegas, np.array(tetrahedra_omegas, dtype='double', order='C'), function) else: integration_weights = np.zeros(len(omegas), dtype='double') phonoc.tetrahedra_integration_weight_at_omegas( integration_weights, np.array(omegas, dtype='double'), np.array(tetrahedra_omegas, dtype='double', order='C'), function) return integration_weights
Returns integration weights Parameters ---------- omegas : float or list of float values Energy(s) at which the integration weight(s) are computed. tetrahedra_omegas : ndarray of list of list Energies at vertices of 24 tetrahedra shape=(24, 4) dytpe='double' function : str, 'I' or 'J' 'J' is for intetration and 'I' is for its derivative.
def new_instance(settings): """ MAKE A PYTHON INSTANCE `settings` HAS ALL THE `kwargs`, PLUS `class` ATTRIBUTE TO INDICATE THE CLASS TO CREATE """ settings = set_default({}, settings) if not settings["class"]: Log.error("Expecting 'class' attribute with fully qualified class name") # IMPORT MODULE FOR HANDLER path = settings["class"].split(".") class_name = path[-1] path = ".".join(path[:-1]) constructor = None try: temp = __import__(path, globals(), locals(), [class_name], 0) constructor = object.__getattribute__(temp, class_name) except Exception as e: Log.error("Can not find class {{class}}", {"class": path}, cause=e) settings['class'] = None try: return constructor(kwargs=settings) # MAYBE IT TAKES A KWARGS OBJECT except Exception as e: pass try: return constructor(**settings) except Exception as e: Log.error("Can not create instance of {{name}}", name=".".join(path), cause=e)
MAKE A PYTHON INSTANCE `settings` HAS ALL THE `kwargs`, PLUS `class` ATTRIBUTE TO INDICATE THE CLASS TO CREATE
def LSTM(nO, nI): """Create an LSTM layer. Args: number out, number in""" weights = LSTM_weights(nO, nI) gates = LSTM_gates(weights.ops) return Recurrent(RNN_step(weights, gates))
Create an LSTM layer. Args: number out, number in
def management(self): """Returns an management service client""" endpoint = self._instance.get_endpoint_for_service_type( "management", region_name=self._instance._region_name, ) token = self._instance.auth.get_token(self._instance.session) self._management = tuskar_client.get_client( 2, os_auth_token=token, tuskar_url=endpoint) return self._management
Returns an management service client
def from_json(cls, path, fatal=True, logger=None): """ :param str path: Path to json file :param bool|None fatal: Abort execution on failure if True :param callable|None logger: Logger to use :return: Deserialized object """ result = cls() result.load(path, fatal=fatal, logger=logger) return result
:param str path: Path to json file :param bool|None fatal: Abort execution on failure if True :param callable|None logger: Logger to use :return: Deserialized object
def getSupportedProtocols(self): """Returns a dictionnary of supported protocols.""" protocols = {} for td in self.TD: if td is not None: strprotocol = "T=%d" % (td & 0x0F) protocols[strprotocol] = True if not self.hasTD[0]: protocols['T=0'] = True return protocols
Returns a dictionnary of supported protocols.
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'transcript') and self.transcript is not None: _dict['transcript'] = self.transcript if hasattr(self, 'confidence') and self.confidence is not None: _dict['confidence'] = self.confidence if hasattr(self, 'timestamps') and self.timestamps is not None: _dict['timestamps'] = self.timestamps if hasattr(self, 'word_confidence') and self.word_confidence is not None: _dict['word_confidence'] = self.word_confidence return _dict
Return a json dictionary representing this model.
def lang(self): """ Languages this text is in :return: List of available languages """ return str(self.graph.value(self.asNode(), DC.language))
Languages this text is in :return: List of available languages
def gen_postinits(self, cls: ClassDefinition) -> str: """ Generate all the typing and existence checks post initialize """ post_inits = [] if not cls.abstract: pkeys = self.primary_keys_for(cls) for pkey in pkeys: post_inits.append(self.gen_postinit(cls, pkey)) for slotname in cls.slots: slot = self.schema.slots[slotname] if not (slot.primary_key or slot.identifier): post_inits.append(self.gen_postinit(cls, slotname)) post_inits_line = '\n\t\t'.join([p for p in post_inits if p]) return (f''' def _fix_elements(self): super()._fix_elements() {post_inits_line}''' + '\n') if post_inits_line else ''
Generate all the typing and existence checks post initialize
def makeLys(segID, N, CA, C, O, geo): '''Creates a Lysine residue''' ##R-Group CA_CB_length=geo.CA_CB_length C_CA_CB_angle=geo.C_CA_CB_angle N_C_CA_CB_diangle=geo.N_C_CA_CB_diangle CB_CG_length=geo.CB_CG_length CA_CB_CG_angle=geo.CA_CB_CG_angle N_CA_CB_CG_diangle=geo.N_CA_CB_CG_diangle CG_CD_length=geo.CG_CD_length CB_CG_CD_angle=geo.CB_CG_CD_angle CA_CB_CG_CD_diangle=geo.CA_CB_CG_CD_diangle CD_CE_length=geo.CD_CE_length CG_CD_CE_angle=geo.CG_CD_CE_angle CB_CG_CD_CE_diangle=geo.CB_CG_CD_CE_diangle CE_NZ_length=geo.CE_NZ_length CD_CE_NZ_angle=geo.CD_CE_NZ_angle CG_CD_CE_NZ_diangle=geo.CG_CD_CE_NZ_diangle carbon_b= calculateCoordinates(N, C, CA, CA_CB_length, C_CA_CB_angle, N_C_CA_CB_diangle) CB= Atom("CB", carbon_b, 0.0 , 1.0, " "," CB", 0,"C") carbon_g= calculateCoordinates(N, CA, CB, CB_CG_length, CA_CB_CG_angle, N_CA_CB_CG_diangle) CG= Atom("CG", carbon_g, 0.0, 1.0, " ", " CG", 0, "C") carbon_d= calculateCoordinates(CA, CB, CG, CG_CD_length, CB_CG_CD_angle, CA_CB_CG_CD_diangle) CD= Atom("CD", carbon_d, 0.0, 1.0, " ", " CD", 0, "C") carbon_e= calculateCoordinates(CB, CG, CD, CD_CE_length, CG_CD_CE_angle, CB_CG_CD_CE_diangle) CE= Atom("CE", carbon_e, 0.0, 1.0, " ", " CE", 0, "C") nitrogen_z= calculateCoordinates(CG, CD, CE, CE_NZ_length, CD_CE_NZ_angle, CG_CD_CE_NZ_diangle) NZ= Atom("NZ", nitrogen_z, 0.0, 1.0, " ", " NZ", 0, "N") ##Create Residue Data Structure res= Residue((' ', segID, ' '), "LYS", ' ') res.add(N) res.add(CA) res.add(C) res.add(O) res.add(CB) res.add(CG) res.add(CD) res.add(CE) res.add(NZ) return res
Creates a Lysine residue
def get_mv_impedance(grid): """ Determine MV grid impedance (resistance and reactance separately) Parameters ---------- grid : LVGridDing0 Returns ------- :any:`list` List containing resistance and reactance of MV grid """ omega = 2 * math.pi * 50 mv_grid = grid.grid_district.lv_load_area.mv_grid_district.mv_grid edges = mv_grid.find_path(grid._station, mv_grid._station, type='edges') r_mv_grid = sum([e[2]['branch'].type['R'] * e[2]['branch'].length / 1e3 for e in edges]) x_mv_grid = sum([e[2]['branch'].type['L'] / 1e3 * omega * e[2][ 'branch'].length / 1e3 for e in edges]) return [r_mv_grid, x_mv_grid]
Determine MV grid impedance (resistance and reactance separately) Parameters ---------- grid : LVGridDing0 Returns ------- :any:`list` List containing resistance and reactance of MV grid
def retire(did): """Retire metadata of an asset --- tags: - ddo parameters: - name: did in: path description: DID of the asset. required: true type: string responses: 200: description: successfully deleted 404: description: This asset DID is not in OceanDB 500: description: Error """ try: if dao.get(did) is None: return 'This asset DID is not in OceanDB', 404 else: dao.delete(did) return 'Succesfully deleted', 200 except Exception as err: return f'Some error: {str(err)}', 500
Retire metadata of an asset --- tags: - ddo parameters: - name: did in: path description: DID of the asset. required: true type: string responses: 200: description: successfully deleted 404: description: This asset DID is not in OceanDB 500: description: Error
def update_from_stripe_data(self, stripe_coupon, exclude_fields=None, commit=True): """ Update StripeCoupon object with data from stripe.Coupon without calling stripe.Coupon.retrieve. To only update the object, set the commit param to False. Returns the number of rows altered or None if commit is False. """ fields_to_update = self.STRIPE_FIELDS - set(exclude_fields or []) update_data = {key: stripe_coupon[key] for key in fields_to_update} for field in ["created", "redeem_by"]: if update_data.get(field): update_data[field] = timestamp_to_timezone_aware_date(update_data[field]) if update_data.get("amount_off"): update_data["amount_off"] = Decimal(update_data["amount_off"]) / 100 # also make sure the object is up to date (without the need to call database) for key, value in six.iteritems(update_data): setattr(self, key, value) if commit: return StripeCoupon.objects.filter(pk=self.pk).update(**update_data)
Update StripeCoupon object with data from stripe.Coupon without calling stripe.Coupon.retrieve. To only update the object, set the commit param to False. Returns the number of rows altered or None if commit is False.
def postorder(self): """Return the nodes in the binary tree using post-order_ traversal. A post-order_ traversal visits left subtree, right subtree, then root. .. _post-order: https://en.wikipedia.org/wiki/Tree_traversal :return: List of nodes. :rtype: [binarytree.Node] **Example**: .. doctest:: >>> from binarytree import Node >>> >>> root = Node(1) >>> root.left = Node(2) >>> root.right = Node(3) >>> root.left.left = Node(4) >>> root.left.right = Node(5) >>> >>> print(root) <BLANKLINE> __1 / \\ 2 3 / \\ 4 5 <BLANKLINE> >>> root.postorder [Node(4), Node(5), Node(2), Node(3), Node(1)] """ node_stack = [] result = [] node = self while True: while node is not None: if node.right is not None: node_stack.append(node.right) node_stack.append(node) node = node.left node = node_stack.pop() if (node.right is not None and len(node_stack) > 0 and node_stack[-1] is node.right): node_stack.pop() node_stack.append(node) node = node.right else: result.append(node) node = None if len(node_stack) == 0: break return result
Return the nodes in the binary tree using post-order_ traversal. A post-order_ traversal visits left subtree, right subtree, then root. .. _post-order: https://en.wikipedia.org/wiki/Tree_traversal :return: List of nodes. :rtype: [binarytree.Node] **Example**: .. doctest:: >>> from binarytree import Node >>> >>> root = Node(1) >>> root.left = Node(2) >>> root.right = Node(3) >>> root.left.left = Node(4) >>> root.left.right = Node(5) >>> >>> print(root) <BLANKLINE> __1 / \\ 2 3 / \\ 4 5 <BLANKLINE> >>> root.postorder [Node(4), Node(5), Node(2), Node(3), Node(1)]
def __select_nearest_ws(jsondata, latitude, longitude): """Select the nearest weatherstation.""" log.debug("__select_nearest_ws: latitude: %s, longitude: %s", latitude, longitude) dist = 0 dist2 = 0 loc_data = None try: ws_json = jsondata[__ACTUAL] ws_json = ws_json[__STATIONMEASUREMENTS] except (KeyError, TypeError): log.warning("Missing section in Buienradar xmldata (%s)." "Can happen 00:00-01:00 CE(S)T", __STATIONMEASUREMENTS) return None for wstation in ws_json: dist2 = __get_ws_distance(wstation, latitude, longitude) if dist2 is not None: if ((loc_data is None) or (dist2 < dist)): dist = dist2 loc_data = wstation if loc_data is None: log.warning("No weatherstation selected; aborting...") return None else: try: log.debug("Selected weatherstation: code='%s', " "name='%s', lat='%s', lon='%s'.", loc_data[__STATIONID], loc_data[__STATIONNAME], loc_data[__LAT], loc_data[__LON]) except KeyError: log.debug("Selected weatherstation") return loc_data
Select the nearest weatherstation.
def rt_subscription_running(self): """Is real time subscription running.""" return ( self._tibber_control.sub_manager is not None and self._tibber_control.sub_manager.is_running and self._subscription_id is not None )
Is real time subscription running.
def get_config(self): """Returns initializer configuration as a JSON-serializable dict.""" return { 'initializers': [ tf.compat.v2.initializers.serialize( tf.keras.initializers.get(init)) for init in self.initializers ], 'sizes': self.sizes, 'validate_args': self.validate_args, }
Returns initializer configuration as a JSON-serializable dict.
def modify_classes(): """ Auto-discover INSTALLED_APPS class_modifiers.py modules and fail silently when not present. This forces an import on them to modify any classes they may want. """ import copy from django.conf import settings from django.contrib.admin.sites import site from django.utils.importlib import import_module from django.utils.module_loading import module_has_submodule for app in settings.INSTALLED_APPS: mod = import_module(app) # Attempt to import the app's class_modifier module. try: before_import_registry = copy.copy(site._registry) import_module('%s.class_modifiers' % app) except: site._registry = before_import_registry # Decide whether to bubble up this error. If the app just # doesn't have an class_modifier module, we can ignore the error # attempting to import it, otherwise we want it to bubble up. if module_has_submodule(mod, 'class_modifiers'): raise
Auto-discover INSTALLED_APPS class_modifiers.py modules and fail silently when not present. This forces an import on them to modify any classes they may want.
def _sync_to_group(self, device): '''Sync the device to the cluster group :param device: bigip object -- device to sync to group ''' config_sync_cmd = 'config-sync to-group %s' % self.name device.tm.cm.exec_cmd('run', utilCmdArgs=config_sync_cmd)
Sync the device to the cluster group :param device: bigip object -- device to sync to group
def associate(op, args): """Given an associative op, return an expression with the same meaning as Expr(op, *args), but flattened -- that is, with nested instances of the same op promoted to the top level. >>> associate('&', [(A&B),(B|C),(B&C)]) (A & B & (B | C) & B & C) >>> associate('|', [A|(B|(C|(A&B)))]) (A | B | C | (A & B)) """ args = dissociate(op, args) if len(args) == 0: return _op_identity[op] elif len(args) == 1: return args[0] else: return Expr(op, *args)
Given an associative op, return an expression with the same meaning as Expr(op, *args), but flattened -- that is, with nested instances of the same op promoted to the top level. >>> associate('&', [(A&B),(B|C),(B&C)]) (A & B & (B | C) & B & C) >>> associate('|', [A|(B|(C|(A&B)))]) (A | B | C | (A & B))
def version(): ''' Return server version from znc --version CLI Example: .. code-block:: bash salt '*' znc.version ''' cmd = ['znc', '--version'] out = __salt__['cmd.run'](cmd, python_shell=False).splitlines() ret = out[0].split(' - ') return ret[0]
Return server version from znc --version CLI Example: .. code-block:: bash salt '*' znc.version
def bowtie_general_stats_table(self): """ Take the parsed stats from the Bowtie report and add it to the basic stats table at the top of the report """ headers = OrderedDict() headers['reads_aligned_percentage'] = { 'title': '% Aligned', 'description': '% reads with at least one reported alignment', 'max': 100, 'min': 0, 'suffix': '%', 'scale': 'YlGn' } headers['reads_aligned'] = { 'title': '{} Aligned'.format(config.read_count_prefix), 'description': 'reads with at least one reported alignment ({})'.format(config.read_count_desc), 'min': 0, 'scale': 'PuRd', 'modify': lambda x: x * config.read_count_multiplier, 'shared_key': 'read_count' } self.general_stats_addcols(self.bowtie_data, headers)
Take the parsed stats from the Bowtie report and add it to the basic stats table at the top of the report
def show_yticklabels_for_all(self, row_column_list=None): """Show the y-axis tick labels for all specified subplots. :param row_column_list: a list containing (row, column) tuples to specify the subplots, or None to indicate *all* subplots. :type row_column_list: list or None """ if row_column_list is None: for subplot in self.subplots: subplot.show_yticklabels() else: for row, column in row_column_list: self.show_yticklabels(row, column)
Show the y-axis tick labels for all specified subplots. :param row_column_list: a list containing (row, column) tuples to specify the subplots, or None to indicate *all* subplots. :type row_column_list: list or None
def _get_pkey(self): """Gets an RSAKey object for the private key file so that we can copy files without logging in with user/password.""" keypath = self.config.server["pkey"] with open(os.path.expanduser(keypath)) as f: pkey = paramiko.RSAKey.from_private_key(f) return pkey
Gets an RSAKey object for the private key file so that we can copy files without logging in with user/password.
def tableexists(tablename): """Test if a table exists.""" result = True try: t = table(tablename, ack=False) except: result = False return result
Test if a table exists.
def map(self, coords): """Map coordinates Parameters ---------- coords : array-like Coordinates to map. Returns ------- coords : ndarray Coordinates. """ for tr in reversed(self.transforms): coords = tr.map(coords) return coords
Map coordinates Parameters ---------- coords : array-like Coordinates to map. Returns ------- coords : ndarray Coordinates.
def _depth(g): """Computes the number of edges on longest path from node to root.""" def _explore(v): if v.depth < 0: v.depth = ((1 + max([-1] + [_explore(annotated_graph[u]) for u in v.parents])) if v.parents else 0) return v.depth annotated_graph = {k: _Node(k, v) for k, v in g.items()} for v in annotated_graph.values(): _explore(v) return annotated_graph
Computes the number of edges on longest path from node to root.
def _init_mgr(self, mgr, axes=None, dtype=None, copy=False): """ passed a manager and a axes dict """ for a, axe in axes.items(): if axe is not None: mgr = mgr.reindex_axis(axe, axis=self._get_block_manager_axis(a), copy=False) # make a copy if explicitly requested if copy: mgr = mgr.copy() if dtype is not None: # avoid further copies if we can if len(mgr.blocks) > 1 or mgr.blocks[0].values.dtype != dtype: mgr = mgr.astype(dtype=dtype) return mgr
passed a manager and a axes dict
def items(self, founditems=[]): #pylint: disable=dangerous-default-value """Returns a depth-first flat list of *all* items below this element (not limited to AbstractElement)""" l = [] for e in self.data: if e not in founditems: #prevent going in recursive loops l.append(e) if isinstance(e, AbstractElement): l += e.items(l) return l
Returns a depth-first flat list of *all* items below this element (not limited to AbstractElement)
def cursor(self): """ Get a cursor for the current connection. For internal use only. """ cursor = self.mdr.cursor() with self.transaction(): try: yield cursor if cursor.rowcount != -1: self.last_row_count = cursor.rowcount self.last_row_id = getattr(cursor, 'lastrowid', None) except: self.last_row_count = None self.last_row_id = None _safe_close(cursor) raise
Get a cursor for the current connection. For internal use only.
def folder_cls_from_folder_name(cls, folder_name, locale): """Returns the folder class that matches a localized folder name. locale is a string, e.g. 'da_DK' """ for folder_cls in cls.WELLKNOWN_FOLDERS + NON_DELETEABLE_FOLDERS: if folder_name.lower() in folder_cls.localized_names(locale): return folder_cls raise KeyError()
Returns the folder class that matches a localized folder name. locale is a string, e.g. 'da_DK'
def save_xml(self, doc, element): '''Save this target port into an xml.dom.Element object.''' super(TargetPort, self).save_xml(doc, element) element.setAttributeNS(XSI_NS, XSI_NS_S + 'type', 'rtsExt:target_port_ext') element.setAttributeNS(RTS_NS, RTS_NS_S + 'portName', self.port_name)
Save this target port into an xml.dom.Element object.
def get_writer(self): """ Get a writer. This method also makes the output filename be the same as the .track file but with .mpc. (Currently only works on local filesystem) :rtype MPCWriter """ if self._writer is None: suffix = tasks.get_suffix(tasks.TRACK_TASK) try: base_name = re.search("(?P<base_name>.*?)\.\d*{}".format(suffix), self.filename).group('base_name') except: base_name = os.path.splitext(self.filename)[0] mpc_filename_pattern = self.output_context.get_full_path( "{}.?{}".format(base_name, suffix)) mpc_file_count = len(glob(mpc_filename_pattern)) mpc_filename = "{}.{}{}".format(base_name, mpc_file_count, suffix) self._writer = self._create_writer(mpc_filename) return self._writer
Get a writer. This method also makes the output filename be the same as the .track file but with .mpc. (Currently only works on local filesystem) :rtype MPCWriter
def run(self): """ Called by the threading system """ try: self._connect() self._register() while True: try: body = self.command_queue.get(block=True, timeout=1 * SECOND) except queue.Empty: body = None if body is not None: result = self._send(body) if result: self.command_queue.task_done() else: # Something was wrong with the socket. self._disconnect() self._connect() self._register() # Check for stop event after a read from the queue. This is to # allow you to open a socket, immediately send to it, and then # stop it. We do this in the Metadata send at application start # time if self._stop_event.is_set(): logger.debug("CoreAgentSocket thread stopping.") break except Exception: logger.debug("CoreAgentSocket thread exception.") finally: self._started_event.clear() self._stop_event.clear() self._stopped_event.set() logger.debug("CoreAgentSocket thread stopped.")
Called by the threading system
def recursive_unicode(obj): """Walks a simple data structure, converting byte strings to unicode. Supports lists, tuples, and dictionaries. """ if isinstance(obj, dict): return dict((recursive_unicode(k), recursive_unicode(v)) for (k, v) in obj.items()) elif isinstance(obj, list): return list(recursive_unicode(i) for i in obj) elif isinstance(obj, tuple): return tuple(recursive_unicode(i) for i in obj) elif isinstance(obj, bytes_type): return to_unicode(obj) else: return obj
Walks a simple data structure, converting byte strings to unicode. Supports lists, tuples, and dictionaries.
def GenerateNewFileName(self): """ Create new file name from show name, season number, episode number and episode name in format ShowName.S<NUM>.E<NUM>.EpisodeName. Returns ---------- string New file name in format ShowName.S<NUM>.E<NUM>.EpisodeName. """ if self.showInfo.showName is not None and self.showInfo.seasonNum is not None and \ self.showInfo.episodeNum is not None and self.showInfo.episodeName is not None: ext = os.path.splitext(self.fileInfo.origPath)[1] newFileName = "{0}.S{1}E{2}".format(self.showInfo.showName, self.showInfo.seasonNum, \ self.showInfo.episodeNum) for episodeNum in self.showInfo.multiPartEpisodeNumbers: newFileName = newFileName + "_{0}".format(episodeNum) newFileName = newFileName + ".{0}{1}".format(self.showInfo.episodeName, ext) newFileName = util.StripSpecialCharacters(newFileName) return newFileName
Create new file name from show name, season number, episode number and episode name in format ShowName.S<NUM>.E<NUM>.EpisodeName. Returns ---------- string New file name in format ShowName.S<NUM>.E<NUM>.EpisodeName.
def capabilities(self, keyword=None): """CAPABILITIES command. Determines the capabilities of the server. Although RFC3977 states that this is a required command for servers to implement not all servers do, so expect that NNTPPermanentError may be raised when this command is issued. See <http://tools.ietf.org/html/rfc3977#section-5.2> Args: keyword: Passed directly to the server, however, this is unused by the server according to RFC3977. Returns: A list of capabilities supported by the server. The VERSION capability is the first capability in the list. """ args = keyword code, message = self.command("CAPABILITIES", args) if code != 101: raise NNTPReplyError(code, message) return [x.strip() for x in self.info_gen(code, message)]
CAPABILITIES command. Determines the capabilities of the server. Although RFC3977 states that this is a required command for servers to implement not all servers do, so expect that NNTPPermanentError may be raised when this command is issued. See <http://tools.ietf.org/html/rfc3977#section-5.2> Args: keyword: Passed directly to the server, however, this is unused by the server according to RFC3977. Returns: A list of capabilities supported by the server. The VERSION capability is the first capability in the list.