code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def get_facet_serializer(self, *args, **kwargs): """ Return the facet serializer instance that should be used for serializing faceted output. """ assert "objects" in kwargs, "`objects` is a required argument to `get_facet_serializer()`" facet_serializer_class = self.get_facet_serializer_class() kwargs["context"] = self.get_serializer_context() kwargs["context"].update({ "objects": kwargs.pop("objects"), "facet_query_params_text": self.facet_query_params_text, }) return facet_serializer_class(*args, **kwargs)
Return the facet serializer instance that should be used for serializing faceted output.
def unlink(self, *others): """ Unlink (disassociate) the specified properties object. @param others: The list object to unlink. Unspecified means unlink all. @type others: [L{Properties},..] @return: self @rtype: L{Properties} """ if not len(others): others = self.links[:] for p in self.links[:]: if p in others: p.teardown() return self
Unlink (disassociate) the specified properties object. @param others: The list object to unlink. Unspecified means unlink all. @type others: [L{Properties},..] @return: self @rtype: L{Properties}
def _element(cls): ''' find the element with controls ''' if not cls.__is_selector(): raise Exception("Invalid selector[%s]." %cls.__control["by"]) driver = Web.driver try: elements = WebDriverWait(driver, cls.__control["timeout"]).until(lambda driver: getattr(driver,"find_elements")(cls.__control["by"], cls.__control["value"])) except: raise Exception("Timeout at %d seconds.Element(%s) not found." %(cls.__control["timeout"],cls.__control["by"])) if len(elements) < cls.__control["index"] + 1: raise Exception("Element [%s]: Element Index Issue! There are [%s] Elements! Index=[%s]" % (cls.__name__, len(elements), cls.__control["index"])) if len(elements) > 1: print("Element [%s]: There are [%d] elements, choosed index=%d" %(cls.__name__,len(elements),cls.__control["index"])) elm = elements[cls.__control["index"]] cls.__control["index"] = 0 return elm
find the element with controls
def _load_lib(): """Load libary by searching possible path.""" lib_path = _find_lib_path() lib = ctypes.cdll.LoadLibrary(lib_path[0]) # DMatrix functions lib.MXGetLastError.restype = ctypes.c_char_p return lib
Load libary by searching possible path.
def countRandomBitFrequencies(numTerms = 100000, percentSparsity = 0.01): """Create a uniformly random counts matrix through sampling.""" # Accumulate counts by inplace-adding sparse matrices counts = SparseMatrix() size = 128*128 counts.resize(1, size) # Pre-allocate buffer sparse matrix sparseBitmap = SparseMatrix() sparseBitmap.resize(1, size) random.seed(42) # Accumulate counts for each bit for each word numWords=0 for term in xrange(numTerms): bitmap = random.sample(xrange(size), int(size*percentSparsity)) bitmap.sort() sparseBitmap.setRowFromSparse(0, bitmap, [1]*len(bitmap)) counts += sparseBitmap numWords += 1 # Compute normalized version of counts as a separate matrix frequencies = SparseMatrix() frequencies.resize(1, size) frequencies.copy(counts) frequencies.divide(float(numWords)) # Wrap up by printing some statistics and then saving the normalized version printFrequencyStatistics(counts, frequencies, numWords, size) frequencyFilename = "bit_frequencies_random.pkl" print "Saving frequency matrix in",frequencyFilename with open(frequencyFilename, "wb") as frequencyPickleFile: pickle.dump(frequencies, frequencyPickleFile) return counts
Create a uniformly random counts matrix through sampling.
def make_wheelfile_inner(base_name, base_dir='.'): """Create a whl file from all the files under 'base_dir'. Places .dist-info at the end of the archive.""" zip_filename = base_name + ".whl" log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir) # Some applications need reproducible .whl files, but they can't do this # without forcing the timestamp of the individual ZipInfo objects. See # issue #143. timestamp = os.environ.get('SOURCE_DATE_EPOCH') if timestamp is None: date_time = None else: date_time = time.gmtime(int(timestamp))[0:6] score = {'WHEEL': 1, 'METADATA': 2, 'RECORD': 3} def writefile(path, date_time): st = os.stat(path) if date_time is None: mtime = time.gmtime(st.st_mtime) date_time = mtime[0:6] zinfo = zipfile.ZipInfo(path, date_time) zinfo.external_attr = st.st_mode << 16 zinfo.compress_type = zipfile.ZIP_DEFLATED with open(path, 'rb') as fp: zip.writestr(zinfo, fp.read()) log.info("adding '%s'" % path) with zipfile.ZipFile(zip_filename, "w", compression=zipfile.ZIP_DEFLATED, allowZip64=True) as zip: deferred = [] for dirpath, dirnames, filenames in os.walk(base_dir): # Sort the directory names so that `os.walk` will walk them in a # defined order on the next iteration. dirnames.sort() for name in sorted(filenames): path = os.path.normpath(os.path.join(dirpath, name)) if os.path.isfile(path): if dirpath.endswith('.dist-info'): deferred.append((score.get(name, 0), path)) else: writefile(path, date_time) deferred.sort() for score, path in deferred: writefile(path, date_time) return zip_filename
Create a whl file from all the files under 'base_dir'. Places .dist-info at the end of the archive.
def _input_as_parameter(self, data): """ Set the input path and log path based on data (a fasta filepath) """ self.Parameters['-i'].on(data) # access data through self.Parameters so we know it's been cast # to a FilePath input_filepath = self.Parameters['-i'].Value input_file_dir, input_filename = split(input_filepath) input_file_base, input_file_ext = splitext(input_filename) # FIXME: the following all other options # formatdb ignores the working directory if not name is passed. self.Parameters['-l'].on(FilePath('%s.log') % input_filename) self.Parameters['-n'].on(FilePath(input_filename)) return ''
Set the input path and log path based on data (a fasta filepath)
def check_attr(self, repo_abspath, attrs): """ Generator that returns attributes for given paths relative to repo_abspath. >>> g = GitArchiver.check_attr('repo_path', ['export-ignore']) >>> next(g) >>> attrs = g.send('relative_path') >>> print(attrs['export-ignore']) @param repo_abspath: Absolute path to a git repository. @type repo_abspath: str @param attrs: Attributes to check. @type attrs: [str] @rtype: generator """ def make_process(): env = dict(environ, GIT_FLUSH='1') cmd = 'git check-attr --stdin -z {0}'.format(' '.join(attrs)) return Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, cwd=repo_abspath, env=env) def read_attrs(process, repo_file_path): process.stdin.write(repo_file_path.encode('utf-8') + b'\0') process.stdin.flush() # For every attribute check-attr will output: <path> NUL <attribute> NUL <info> NUL path, attr, info = b'', b'', b'' nuls_count = 0 nuls_expected = 3 * len(attrs) while nuls_count != nuls_expected: b = process.stdout.read(1) if b == b'' and process.poll() is not None: raise RuntimeError("check-attr exited prematurely") elif b == b'\0': nuls_count += 1 if nuls_count % 3 == 0: yield map(self.decode_git_output, (path, attr, info)) path, attr, info = b'', b'', b'' elif nuls_count % 3 == 0: path += b elif nuls_count % 3 == 1: attr += b elif nuls_count % 3 == 2: info += b def read_attrs_old(process, repo_file_path): """ Compatibility with versions 1.8.5 and below that do not recognize -z for output. """ process.stdin.write(repo_file_path.encode('utf-8') + b'\0') process.stdin.flush() # For every attribute check-attr will output: <path>: <attribute>: <info>\n # where <path> is c-quoted path, attr, info = b'', b'', b'' lines_count = 0 lines_expected = len(attrs) while lines_count != lines_expected: line = process.stdout.readline() info_start = line.rfind(b': ') if info_start == -1: raise RuntimeError("unexpected output of check-attr: {0}".format(line)) attr_start = line.rfind(b': ', 0, info_start) if attr_start == -1: raise RuntimeError("unexpected output of check-attr: {0}".format(line)) info = line[info_start + 2:len(line) - 1] # trim leading ": " and trailing \n attr = line[attr_start + 2:info_start] # trim leading ": " path = line[:attr_start] yield map(self.decode_git_output, (path, attr, info)) lines_count += 1 if not attrs: return process = make_process() try: while True: repo_file_path = yield repo_file_attrs = {} if self.git_version is None or self.git_version > (1, 8, 5): reader = read_attrs else: reader = read_attrs_old for path, attr, value in reader(process, repo_file_path): repo_file_attrs[attr] = value yield repo_file_attrs finally: process.stdin.close() process.wait()
Generator that returns attributes for given paths relative to repo_abspath. >>> g = GitArchiver.check_attr('repo_path', ['export-ignore']) >>> next(g) >>> attrs = g.send('relative_path') >>> print(attrs['export-ignore']) @param repo_abspath: Absolute path to a git repository. @type repo_abspath: str @param attrs: Attributes to check. @type attrs: [str] @rtype: generator
def add_signature(name=None, inputs=None, outputs=None): """Adds a signature to the module definition. NOTE: This must be called within a `module_fn` that is defining a Module. Args: name: Signature name as a string. If omitted, it is interpreted as 'default' and is the signature used when `Module.__call__` `signature` is not specified. inputs: A dict from input name to Tensor or SparseTensor to feed when applying the signature. If a single tensor is passed, it is interpreted as a dict with a single 'default' entry. outputs: A dict from output name to Tensor or SparseTensor to return from applying the signature. If a single tensor is passed, it is interpreted as a dict with a single 'default' entry. Raises: ValueError: if the arguments are invalid. """ if not name: name = "default" if inputs is None: inputs = {} if outputs is None: outputs = {} if not isinstance(inputs, dict): inputs = {"default": inputs} if not isinstance(outputs, dict): outputs = {"default": outputs} message = find_signature_inputs_from_multivalued_ops(inputs) if message: logging.error(message) message = find_signature_input_colocation_error(name, inputs) if message: raise ValueError(message) saved_model_lib.add_signature(name, inputs, outputs)
Adds a signature to the module definition. NOTE: This must be called within a `module_fn` that is defining a Module. Args: name: Signature name as a string. If omitted, it is interpreted as 'default' and is the signature used when `Module.__call__` `signature` is not specified. inputs: A dict from input name to Tensor or SparseTensor to feed when applying the signature. If a single tensor is passed, it is interpreted as a dict with a single 'default' entry. outputs: A dict from output name to Tensor or SparseTensor to return from applying the signature. If a single tensor is passed, it is interpreted as a dict with a single 'default' entry. Raises: ValueError: if the arguments are invalid.
def form_valid(self, form): """ save the data :param form: :return: """ valid = True # 'name' is injected in the clean() of the form line 56 name = form.cleaned_data.get('name').name user = self.request.user form.save(user=user, service_name=name) sa = ServicesActivated.objects.get(name=name) if sa.auth_required and sa.self_hosted: # trigger the checking of the service from django_th.services import default_provider default_provider.load_services() service_provider = default_provider.get_service(name) result = service_provider.check(self.request, user) if result is not True: # the call of the API failed due to an error which is in the result string # return by the call of the API form.add_error('host', result) messages.error(self.request, result) return redirect('edit_service', pk=self.kwargs.get(self.pk_url_kwarg)) if valid: messages.success(self.request, _('Service %s modified successfully') % name.split('Service')[1]) return HttpResponseRedirect(reverse('user_services'))
save the data :param form: :return:
def reset_weights(self): """ Initialize properly model weights """ self.input_block.reset_weights() self.policy_backbone.reset_weights() self.value_backbone.reset_weights() self.action_head.reset_weights() self.critic_head.reset_weights()
Initialize properly model weights
def averagingData(array, windowSize=None, averagingType='median'): """#TODO: docstring :param array: #TODO: docstring :param windowSize: #TODO: docstring :param averagingType: "median" or "mean" :returns: #TODO: docstring """ assert averagingType in ['median', 'mean'] if windowSize is None: windowSize = int(len(array) / 50) if int(len(array) / 50) > 100 else 100 if averagingType == 'median': averagedData = runningMedian(array, windowSize) elif averagingType == 'mean': averagedData = runningMean(array, len(array), windowSize) return averagedData
#TODO: docstring :param array: #TODO: docstring :param windowSize: #TODO: docstring :param averagingType: "median" or "mean" :returns: #TODO: docstring
def _task_to_text(self, task): """ Return a standard formatting of a Task serialization. """ started = self._format_date(task.get('started_at', None)) completed = self._format_date(task.get('completed_at', None)) success = task.get('success', None) success_lu = {None: 'Not executed', True: 'Success', False: 'Failed'} run_log = task.get('run_log', {}) return '\n'.join(['Task: %s' % task.get('name', None), 'Command: %s' % task.get('command', None), 'Result: %s' % success_lu[success], 'Started at: %s' % started, 'Completed at: %s' % completed, 'Return Code: %s' % run_log.get('return_code', None), 'Stdout: %s' % run_log.get('stdout', None), 'Stderr: %s' % run_log.get('stderr', None)])
Return a standard formatting of a Task serialization.
def create_attachment(self, upload_stream, project, wiki_identifier, name, **kwargs): """CreateAttachment. Creates an attachment in the wiki. :param object upload_stream: Stream to upload :param str project: Project ID or project name :param str wiki_identifier: Wiki Id or name. :param str name: Wiki attachment name. :rtype: :class:`<WikiAttachmentResponse> <azure.devops.v5_0.wiki.models.WikiAttachmentResponse>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if wiki_identifier is not None: route_values['wikiIdentifier'] = self._serialize.url('wiki_identifier', wiki_identifier, 'str') query_parameters = {} if name is not None: query_parameters['name'] = self._serialize.query('name', name, 'str') if "callback" in kwargs: callback = kwargs["callback"] else: callback = None content = self._client.stream_upload(upload_stream, callback=callback) response = self._send(http_method='PUT', location_id='c4382d8d-fefc-40e0-92c5-49852e9e17c0', version='5.0', route_values=route_values, query_parameters=query_parameters, content=content, media_type='application/octet-stream') response_object = models.WikiAttachmentResponse() response_object.attachment = self._deserialize('WikiAttachment', response) response_object.eTag = response.headers.get('ETag') return response_object
CreateAttachment. Creates an attachment in the wiki. :param object upload_stream: Stream to upload :param str project: Project ID or project name :param str wiki_identifier: Wiki Id or name. :param str name: Wiki attachment name. :rtype: :class:`<WikiAttachmentResponse> <azure.devops.v5_0.wiki.models.WikiAttachmentResponse>`
def get_all_tags(self, filters=None, max_records=None, next_token=None): """ Lists the Auto Scaling group tags. This action supports pagination by returning a token if there are more pages to retrieve. To get the next page, call this action again with the returned token as the NextToken parameter. :type filters: dict :param filters: The value of the filter type used to identify the tags to be returned. NOT IMPLEMENTED YET. :type max_records: int :param max_records: Maximum number of tags to return. :rtype: list :returns: List of :class:`boto.ec2.autoscale.tag.Tag` instances. """ params = {} if max_records: params['MaxRecords'] = max_records if next_token: params['NextToken'] = next_token return self.get_list('DescribeTags', params, [('member', Tag)])
Lists the Auto Scaling group tags. This action supports pagination by returning a token if there are more pages to retrieve. To get the next page, call this action again with the returned token as the NextToken parameter. :type filters: dict :param filters: The value of the filter type used to identify the tags to be returned. NOT IMPLEMENTED YET. :type max_records: int :param max_records: Maximum number of tags to return. :rtype: list :returns: List of :class:`boto.ec2.autoscale.tag.Tag` instances.
def cmd_unzip(zip_file, dest, excludes=None, options=None, template=None, runas=None, trim_output=False, password=None): ''' .. versionadded:: 2015.5.0 In versions 2014.7.x and earlier, this function was known as ``archive.unzip``. Uses the ``unzip`` command to unpack zip files. This command is part of the `Info-ZIP`_ suite of tools, and is typically packaged as simply ``unzip``. .. _`Info-ZIP`: http://www.info-zip.org/ zip_file Path of zip file to be unpacked dest The destination directory into which the file should be unpacked excludes : None Comma-separated list of files not to unpack. Can also be passed in a Python list. template : None Can be set to 'jinja' or another supported template engine to render the command arguments before execution: .. code-block:: bash salt '*' archive.cmd_unzip template=jinja /tmp/zipfile.zip '/tmp/{{grains.id}}' excludes=file_1,file_2 options Optional when using ``zip`` archives, ignored when usign other archives files. This is mostly used to overwrite existing files with ``o``. This options are only used when ``unzip`` binary is used. .. versionadded:: 2016.3.1 runas : None Unpack the zip file as the specified user. Defaults to the user under which the minion is running. .. versionadded:: 2015.5.0 trim_output : False The number of files we should output on success before the rest are trimmed, if this is set to True then it will default to 100 password Password to use with password protected zip files .. note:: This is not considered secure. It is recommended to instead use :py:func:`archive.unzip <salt.modules.archive.unzip>` for password-protected ZIP files. If a password is used here, then the unzip command run to extract the ZIP file will not show up in the minion log like most shell commands Salt runs do. However, the password will still be present in the events logged to the minion log at the ``debug`` log level. If the minion is logging at ``debug`` (or more verbose), then be advised that the password will appear in the log. .. versionadded:: 2016.11.0 CLI Example: .. code-block:: bash salt '*' archive.cmd_unzip /tmp/zipfile.zip /home/strongbad/ excludes=file_1,file_2 ''' if isinstance(excludes, six.string_types): excludes = [x.strip() for x in excludes.split(',')] elif isinstance(excludes, (float, six.integer_types)): excludes = [six.text_type(excludes)] cmd = ['unzip'] if password: cmd.extend(['-P', password]) if options: cmd.extend(shlex.split(options)) cmd.extend(['{0}'.format(zip_file), '-d', '{0}'.format(dest)]) if excludes is not None: cmd.append('-x') cmd.extend(excludes) result = __salt__['cmd.run_all']( cmd, template=template, runas=runas, python_shell=False, redirect_stderr=True, output_loglevel='quiet' if password else 'debug') if result['retcode'] != 0: raise CommandExecutionError(result['stdout']) return _trim_files(result['stdout'].splitlines(), trim_output)
.. versionadded:: 2015.5.0 In versions 2014.7.x and earlier, this function was known as ``archive.unzip``. Uses the ``unzip`` command to unpack zip files. This command is part of the `Info-ZIP`_ suite of tools, and is typically packaged as simply ``unzip``. .. _`Info-ZIP`: http://www.info-zip.org/ zip_file Path of zip file to be unpacked dest The destination directory into which the file should be unpacked excludes : None Comma-separated list of files not to unpack. Can also be passed in a Python list. template : None Can be set to 'jinja' or another supported template engine to render the command arguments before execution: .. code-block:: bash salt '*' archive.cmd_unzip template=jinja /tmp/zipfile.zip '/tmp/{{grains.id}}' excludes=file_1,file_2 options Optional when using ``zip`` archives, ignored when usign other archives files. This is mostly used to overwrite existing files with ``o``. This options are only used when ``unzip`` binary is used. .. versionadded:: 2016.3.1 runas : None Unpack the zip file as the specified user. Defaults to the user under which the minion is running. .. versionadded:: 2015.5.0 trim_output : False The number of files we should output on success before the rest are trimmed, if this is set to True then it will default to 100 password Password to use with password protected zip files .. note:: This is not considered secure. It is recommended to instead use :py:func:`archive.unzip <salt.modules.archive.unzip>` for password-protected ZIP files. If a password is used here, then the unzip command run to extract the ZIP file will not show up in the minion log like most shell commands Salt runs do. However, the password will still be present in the events logged to the minion log at the ``debug`` log level. If the minion is logging at ``debug`` (or more verbose), then be advised that the password will appear in the log. .. versionadded:: 2016.11.0 CLI Example: .. code-block:: bash salt '*' archive.cmd_unzip /tmp/zipfile.zip /home/strongbad/ excludes=file_1,file_2
def get_ga_tracking_id(self): """ Retrieve tracking ID from settings """ if hasattr(settings, self.ga_tracking_id_settings_key): return getattr(settings, self.ga_tracking_id_settings_key) return super(GARequestErrorReportingMixin, self).get_ga_tracking_id()
Retrieve tracking ID from settings
def del_key(self, ref): """ Delete a key. (ref) Return None or LCDd response on error """ if ref not in self.keys: response = self.request("client_del_key %s" % (ref)) self.keys.remove(ref) if "success" in response: return None else: return response
Delete a key. (ref) Return None or LCDd response on error
def atlas_peer_dequeue_all( peer_queue=None ): """ Get all queued peers """ peers = [] with AtlasPeerQueueLocked(peer_queue) as pq: while len(pq) > 0: peers.append( pq.pop(0) ) return peers
Get all queued peers
def is_underlined(r): """ The function will return True if the r tag passed in is considered underlined. """ w_namespace = get_namespace(r, 'w') rpr = r.find('%srPr' % w_namespace) if rpr is None: return False underline = rpr.find('%su' % w_namespace) return style_is_false(underline)
The function will return True if the r tag passed in is considered underlined.
def find_field_generators(obj): """ Return dictionary with the names and instances of all tohu.BaseGenerator occurring in the given object's class & instance namespaces. """ cls_dict = obj.__class__.__dict__ obj_dict = obj.__dict__ #debug_print_dict(cls_dict, 'cls_dict') #debug_print_dict(obj_dict, 'obj_dict') field_gens = {} add_field_generators(field_gens, cls_dict) add_field_generators(field_gens, obj_dict) return field_gens
Return dictionary with the names and instances of all tohu.BaseGenerator occurring in the given object's class & instance namespaces.
def copy_opts_for_single_ifo(opt, ifo): """ Takes the namespace object (opt) from the multi-detector interface and returns a namespace object for a single ifo that can be used with functions expecting output from the single-detector interface. """ opt = copy.deepcopy(opt) for arg, val in vars(opt).items(): if isinstance(val, DictWithDefaultReturn): setattr(opt, arg, getattr(opt, arg)[ifo]) return opt
Takes the namespace object (opt) from the multi-detector interface and returns a namespace object for a single ifo that can be used with functions expecting output from the single-detector interface.
def Q_weir_rectangular_full_Kindsvater_Carter(h1, h2, b): r'''Calculates the flow rate across a full-channel rectangular weir from the height of the liquid above the crest of the weir, the liquid depth beneath it, and the width of the channel. Model from [1]_ as reproduced in [2]_. Flow rate is given by: .. math:: Q = \frac{2}{3}\sqrt{2}\left(0.602 + 0.0832\frac{h_1}{h_2}\right) b\sqrt{g} (h_1 +0.00125)^{1.5} Parameters ---------- h1 : float Height of the fluid above the crest of the weir [m] h2 : float Height of the fluid below the crest of the weir [m] b : float Width of the channel section [m] Returns ------- Q : float Volumetric flow rate across the weir [m^3/s] Notes ----- The following limits apply to the use of this equation: h1 > 0.03 m b > 0.15 m h2 > 0.1 m h1/h2 < 2 Examples -------- >>> Q_weir_rectangular_full_Kindsvater_Carter(h1=0.3, h2=0.4, b=2) 0.641560300081563 References ---------- .. [1] Kindsvater, Carl E., and Rolland W. Carter. "Discharge Characteristics of Rectangular Thin-Plate Weirs." Journal of the Hydraulics Division 83, no. 6 (December 1957): 1-36. .. [2] Blevins, Robert D. Applied Fluid Dynamics Handbook. New York, N.Y.: Van Nostrand Reinhold Co., 1984. ''' Q = 2/3.*2**0.5*(0.602 + 0.075*h1/h2)*(b - 0.001)*g**0.5*(h1 + 0.001)**1.5 return Q
r'''Calculates the flow rate across a full-channel rectangular weir from the height of the liquid above the crest of the weir, the liquid depth beneath it, and the width of the channel. Model from [1]_ as reproduced in [2]_. Flow rate is given by: .. math:: Q = \frac{2}{3}\sqrt{2}\left(0.602 + 0.0832\frac{h_1}{h_2}\right) b\sqrt{g} (h_1 +0.00125)^{1.5} Parameters ---------- h1 : float Height of the fluid above the crest of the weir [m] h2 : float Height of the fluid below the crest of the weir [m] b : float Width of the channel section [m] Returns ------- Q : float Volumetric flow rate across the weir [m^3/s] Notes ----- The following limits apply to the use of this equation: h1 > 0.03 m b > 0.15 m h2 > 0.1 m h1/h2 < 2 Examples -------- >>> Q_weir_rectangular_full_Kindsvater_Carter(h1=0.3, h2=0.4, b=2) 0.641560300081563 References ---------- .. [1] Kindsvater, Carl E., and Rolland W. Carter. "Discharge Characteristics of Rectangular Thin-Plate Weirs." Journal of the Hydraulics Division 83, no. 6 (December 1957): 1-36. .. [2] Blevins, Robert D. Applied Fluid Dynamics Handbook. New York, N.Y.: Van Nostrand Reinhold Co., 1984.
def parse_temperature_response( temperature_string: str) -> Mapping[str, Optional[float]]: ''' Example input: "T:none C:25" ''' err_msg = 'Unexpected argument to parse_temperature_response: {}'.format( temperature_string) if not temperature_string or \ not isinstance(temperature_string, str): raise ParseError(err_msg) parsed_values = temperature_string.strip().split(' ') if len(parsed_values) < 2: log.error(err_msg) raise ParseError(err_msg) data = { parse_key_from_substring(s): parse_number_from_substring(s) for s in parsed_values[:2] } if 'C' not in data or 'T' not in data: raise ParseError(err_msg) data = { 'current': data['C'], 'target': data['T'] } return data
Example input: "T:none C:25"
def array_2d_from_array_1d(self, padded_array_1d): """ Map a padded 1D array of values to its original 2D array, trimming all edge values. Parameters ----------- padded_array_1d : ndarray A 1D array of values which were computed using the *PaddedRegularGrid*. """ padded_array_2d = self.map_to_2d_keep_padded(padded_array_1d) pad_size_0 = self.mask.shape[0] - self.image_shape[0] pad_size_1 = self.mask.shape[1] - self.image_shape[1] return (padded_array_2d[pad_size_0 // 2:self.mask.shape[0] - pad_size_0 // 2, pad_size_1 // 2:self.mask.shape[1] - pad_size_1 // 2])
Map a padded 1D array of values to its original 2D array, trimming all edge values. Parameters ----------- padded_array_1d : ndarray A 1D array of values which were computed using the *PaddedRegularGrid*.
def activate(self, span, finish_on_close): """ Make a :class:`~opentracing.Span` instance active. :param span: the :class:`~opentracing.Span` that should become active. :param finish_on_close: whether *span* should automatically be finished when :meth:`Scope.close()` is called. If no :func:`tracer_stack_context()` is detected, thread-local storage will be used to store the :class:`~opentracing.Scope`. Observe that in this case the active :class:`~opentracing.Span` will not be automatically propagated to the child corotuines. :return: a :class:`~opentracing.Scope` instance to control the end of the active period for the :class:`~opentracing.Span`. It is a programming error to neglect to call :meth:`Scope.close()` on the returned instance. """ context = self._get_context() if context is None: return super(TornadoScopeManager, self).activate(span, finish_on_close) scope = _TornadoScope(self, span, finish_on_close) context.active = scope return scope
Make a :class:`~opentracing.Span` instance active. :param span: the :class:`~opentracing.Span` that should become active. :param finish_on_close: whether *span* should automatically be finished when :meth:`Scope.close()` is called. If no :func:`tracer_stack_context()` is detected, thread-local storage will be used to store the :class:`~opentracing.Scope`. Observe that in this case the active :class:`~opentracing.Span` will not be automatically propagated to the child corotuines. :return: a :class:`~opentracing.Scope` instance to control the end of the active period for the :class:`~opentracing.Span`. It is a programming error to neglect to call :meth:`Scope.close()` on the returned instance.
def daysInMonth(date): """ Returns the number of the days in the month for the given date. This will take into account leap years based on the inputted date's year. :param date | <datetime.date> :return <int> """ # map from Qt information if type(date).__name__ in ('QDate', 'QDateTime', 'QTime'): date = date.toPython() month = date.month # look for a leap year if month == 2 and not date.year % 4: return 29 return DaysInMonth.get(month, -1)
Returns the number of the days in the month for the given date. This will take into account leap years based on the inputted date's year. :param date | <datetime.date> :return <int>
def parse(cls, conn): """Read a request from the HTTP connection ``conn``. May raise ``BadHttpRequestError``. """ req = cls(conn) req_line = yield from conn.reader.readline() logger('HttpRequest').debug('req_line = %r', req_line) req._parse_req_line(req_line) header_line = yield from conn.reader.readline() while len(header_line) > 0 and header_line != b'\r\n': try: req._parse_header(header_line) except BadHttpHeaderError as e: # Tolerating 'minor' mistakes logger('HttpRequest').debug(traceback.format_exc()) header_line = yield from conn.reader.readline() return req
Read a request from the HTTP connection ``conn``. May raise ``BadHttpRequestError``.
def _auto_scroll(self, *args): """ Scroll to the end of the text view """ adj = self['scrollable'].get_vadjustment() adj.set_value(adj.get_upper() - adj.get_page_size())
Scroll to the end of the text view
def __get_gp_plan(self, gp): """ Request the planner a search plan for a given gp and returns the plan as a graph. :param gp: :return: """ query = urlencode({'gp': gp}) response = requests.get('{}/plan?'.format(self.__planner) + query, headers={'Accept': 'text/turtle'}) graph = Graph() try: graph.parse(source=StringIO.StringIO(response.text), format='turtle') except BadSyntax: pass return graph
Request the planner a search plan for a given gp and returns the plan as a graph. :param gp: :return:
def cd(path_to): # pylint: disable=invalid-name """cd to the given path If the path is a file, then cd to its parent directory Remember current directory before the cd so that we can cd back there with cd('-') """ if path_to == '-': if not cd.previous: raise PathError('No previous directory to return to') return cd(cd.previous) if not hasattr(path_to, 'cd'): path_to = makepath(path_to) try: previous = os.getcwd() except OSError as e: if 'No such file or directory' in str(e): return False raise if path_to.isdir(): os.chdir(path_to) elif path_to.isfile(): os.chdir(path_to.parent) elif not os.path.exists(path_to): return False else: raise PathError('Cannot cd to %s' % path_to) cd.previous = previous return True
cd to the given path If the path is a file, then cd to its parent directory Remember current directory before the cd so that we can cd back there with cd('-')
def circle_touching_line(center, radius, start, end): """ Return true if the given circle intersects the given segment. Note that this checks for intersection with a line segment, and not an actual line. :param center: Center of the circle. :type center: Vector :param radius: Radius of the circle. :type radius: float :param start: The first end of the line segment. :type start: Vector :param end: The second end of the line segment. :type end: Vector """ C, R = center, radius A, B = start, end a = (B.x - A.x)**2 + (B.y - A.y)**2 b = 2 * (B.x - A.x) * (A.x - C.x) \ + 2 * (B.y - A.y) * (A.y - C.y) c = C.x**2 + C.y**2 + A.x**2 + A.y**2 \ - 2 * (C.x * A.x + C.y * A.y) - R**2 discriminant = b**2 - 4 * a * c if discriminant < 0: return False elif discriminant == 0: u = v = -b / float(2 * a) else: u = (-b + math.sqrt(discriminant)) / float(2 * a) v = (-b - math.sqrt(discriminant)) / float(2 * a) if u < 0 and v < 0: return False if u > 1 and v > 1: return False return True
Return true if the given circle intersects the given segment. Note that this checks for intersection with a line segment, and not an actual line. :param center: Center of the circle. :type center: Vector :param radius: Radius of the circle. :type radius: float :param start: The first end of the line segment. :type start: Vector :param end: The second end of the line segment. :type end: Vector
def metadata(request): """ Returns an XML with the SAML 2.0 metadata for this Idp. The metadata is constructed on-the-fly based on the config dict in the django settings. """ conf = IdPConfig() conf.load(copy.deepcopy(settings.SAML_IDP_CONFIG)) metadata = entity_descriptor(conf) return HttpResponse(content=text_type(metadata).encode('utf-8'), content_type="text/xml; charset=utf8")
Returns an XML with the SAML 2.0 metadata for this Idp. The metadata is constructed on-the-fly based on the config dict in the django settings.
def event( title, text, alert_type=None, aggregation_key=None, source_type_name=None, date_happened=None, priority=None, tags=None, hostname=None, ): """ Send an event. """
Send an event.
def get_category_metrics(self, category): """Get metrics belonging to the given category""" slug_list = self._category_slugs(category) return self.get_metrics(slug_list)
Get metrics belonging to the given category
def cancel_subscription(self, sid): """ Unsubscribes from a previously configured subscription. """ url = urljoin(self._url_base, self._event_sub_url) headers = dict( HOST=urlparse(url).netloc, SID=sid ) resp = requests.request('UNSUBSCRIBE', url, headers=headers, auth=self.device.http_auth) resp.raise_for_status()
Unsubscribes from a previously configured subscription.
def run_kmeans(self, X, K): """Runs k-means and returns the labels assigned to the data.""" wX = vq.whiten(X) means, dist = vq.kmeans(wX, K, iter=100) labels, dist = vq.vq(wX, means) return means, labels
Runs k-means and returns the labels assigned to the data.
def visit_For(self, node): ''' For loop creates aliasing between the target and the content of the iterator >>> from pythran import passmanager >>> pm = passmanager.PassManager('demo') >>> module = ast.parse(""" ... def foo(a): ... for i in a: ... {i}""") >>> result = pm.gather(Aliases, module) >>> Aliases.dump(result, filter=ast.Set) {i} => ['|i|'] Not very useful, unless we know something about the iterated container >>> module = ast.parse(""" ... def foo(a, b): ... for i in [a, b]: ... {i}""") >>> result = pm.gather(Aliases, module) >>> Aliases.dump(result, filter=ast.Set) {i} => ['|a|', '|b|'] ''' iter_aliases = self.visit(node.iter) if all(isinstance(x, ContainerOf) for x in iter_aliases): target_aliases = set() for iter_alias in iter_aliases: target_aliases.add(iter_alias.containee) else: target_aliases = {node.target} self.add(node.target, target_aliases) self.aliases[node.target.id] = self.result[node.target] self.generic_visit(node) self.generic_visit(node)
For loop creates aliasing between the target and the content of the iterator >>> from pythran import passmanager >>> pm = passmanager.PassManager('demo') >>> module = ast.parse(""" ... def foo(a): ... for i in a: ... {i}""") >>> result = pm.gather(Aliases, module) >>> Aliases.dump(result, filter=ast.Set) {i} => ['|i|'] Not very useful, unless we know something about the iterated container >>> module = ast.parse(""" ... def foo(a, b): ... for i in [a, b]: ... {i}""") >>> result = pm.gather(Aliases, module) >>> Aliases.dump(result, filter=ast.Set) {i} => ['|a|', '|b|']
def _fill(self, values): """Add extra values to fill the line""" if not self._previous_line: self._previous_line = values return super(StackedLine, self)._fill(values) new_values = values + list(reversed(self._previous_line)) self._previous_line = values return new_values
Add extra values to fill the line
def processFormData(self, data, dataset_name): """Take a string of form data as CSV and convert to insert statements, return template and data values""" # Get the cols for this dataset cols = self.datasets[dataset_name] reader = self.getCSVReader(data, reader_type=csv.reader) # Get fieldnames from first line of reader, what is left is set of rows fieldnames = next(reader) # Check columns for col in cols: varname = col["varname"] if varname not in fieldnames: raise ValueError("Column %s not found in data for dataset %s" % (varname, dataset_name,)) # Now call overrideen methods of base classes to process this data self._processDML(dataset_name, cols, reader)
Take a string of form data as CSV and convert to insert statements, return template and data values
def parse_headers(self, headers): """Parses a semi-colon delimited list of headers. Example: foo=bar;baz=qux """ for name, value in _parse_keyvalue_list(headers): self.headers[name] = value
Parses a semi-colon delimited list of headers. Example: foo=bar;baz=qux
def irfs(self, **kwargs): """ Get the name of IFRs associted with a particular dataset """ dsval = kwargs.get('dataset', self.dataset(**kwargs)) tokens = dsval.split('_') irf_name = "%s_%s_%s" % (DATASET_DICTIONARY['%s_%s' % (tokens[0], tokens[1])], EVCLASS_NAME_DICTIONARY[tokens[3]], kwargs.get('irf_ver')) return irf_name
Get the name of IFRs associted with a particular dataset
def sequence_names(fasta): """ return a list of the sequence IDs in a FASTA file """ sequences = SeqIO.parse(fasta, "fasta") records = [record.id for record in sequences] return records
return a list of the sequence IDs in a FASTA file
def add(self, type, orig, replace): """Add an entry in the catalog, it may overwrite existing but different entries. """ ret = libxml2mod.xmlACatalogAdd(self._o, type, orig, replace) return ret
Add an entry in the catalog, it may overwrite existing but different entries.
def action(self, action): r"""Activate or deactivate an output. Use the <wait> option to activate/deactivate the port for a limited period of time. <Port ID> = Port name. Default: Name from Output.Name <a> = Action character. /=active, \=inactive <wait> = Delay before the next action. Unit: milliseconds Note: The :, / and \ characters must be percent-encoded in the URI. See Percent encoding. Example: To set output 1 to active, use 1:/. In the URI, the action argument becomes action=1%3A%2F """ if not self.direction == DIRECTION_OUT: return port_action = quote( '{port}:{action}'.format(port=int(self.id)+1, action=action), safe='' ) url = URL + ACTION.format(action=port_action) self._request('get', url)
r"""Activate or deactivate an output. Use the <wait> option to activate/deactivate the port for a limited period of time. <Port ID> = Port name. Default: Name from Output.Name <a> = Action character. /=active, \=inactive <wait> = Delay before the next action. Unit: milliseconds Note: The :, / and \ characters must be percent-encoded in the URI. See Percent encoding. Example: To set output 1 to active, use 1:/. In the URI, the action argument becomes action=1%3A%2F
def send(self, message): """ Send a message object :type message: data.OutgoingMessage :param message: The message to send :rtype: data.OutgoingMessage :returns: The sent message with populated fields :raises AssertionError: wrong provider name encountered (returned by the router, or provided to OutgoingMessage) :raises MessageSendError: generic errors :raises AuthError: provider authentication failed :raises LimitsError: sending limits exceeded :raises CreditError: not enough money on the account """ # Which provider to use? provider_name = self._default_provider # default if message.provider is not None: assert message.provider in self._providers, \ 'Unknown provider specified in OutgoingMessage.provideer: {}'.format(provider_name) provider = self.get_provider(message.provider) else: # Apply routing if message.routing_values is not None: # Use the default provider when no routing values are given # Routing values are present provider_name = self.router(message, *message.routing_values) or self._default_provider assert provider_name in self._providers, \ 'Routing function returned an unknown provider name: {}'.format(provider_name) provider = self.get_provider(provider_name) # Set message provider name message.provider = provider.name # Send the message using the provider message = provider.send(message) # Emit the send event self.onSend(message) # Finish return message
Send a message object :type message: data.OutgoingMessage :param message: The message to send :rtype: data.OutgoingMessage :returns: The sent message with populated fields :raises AssertionError: wrong provider name encountered (returned by the router, or provided to OutgoingMessage) :raises MessageSendError: generic errors :raises AuthError: provider authentication failed :raises LimitsError: sending limits exceeded :raises CreditError: not enough money on the account
def main(): """Sample usage for this python module This main method simply illustrates sample usage for this python module. :return: None """ log = logging.getLogger(Logify.get_name() + '.logify.main') log.info('logger name is: %s', Logify.get_name()) log.debug('This is DEBUG') log.info('This is INFO') log.warning('This is a WARNING') log.error('This is an ERROR')
Sample usage for this python module This main method simply illustrates sample usage for this python module. :return: None
def login_required(f, redirect_field_name=REDIRECT_FIELD_NAME, login_url=None): """ Decorator that wraps django.contrib.auth.decorators.login_required, but supports extracting Shopify's authentication query parameters (`shop`, `timestamp`, `signature` and `hmac`) and passing them on to the login URL (instead of just wrapping them up and encoding them in to the `next` parameter). This is useful for ensuring that users are automatically logged on when they first access a page through the Shopify Admin, which passes these parameters with every page request to an embedded app. """ @wraps(f) def wrapper(request, *args, **kwargs): if is_authenticated(request.user): return f(request, *args, **kwargs) # Extract the Shopify-specific authentication parameters from the current request. shopify_params = { k: request.GET[k] for k in ['shop', 'timestamp', 'signature', 'hmac'] if k in request.GET } # Get the login URL. resolved_login_url = force_str(resolve_url(login_url or settings.LOGIN_URL)) # Add the Shopify authentication parameters to the login URL. updated_login_url = add_query_parameters_to_url(resolved_login_url, shopify_params) django_login_required_decorator = django_login_required(redirect_field_name=redirect_field_name, login_url=updated_login_url) return django_login_required_decorator(f)(request, *args, **kwargs) return wrapper
Decorator that wraps django.contrib.auth.decorators.login_required, but supports extracting Shopify's authentication query parameters (`shop`, `timestamp`, `signature` and `hmac`) and passing them on to the login URL (instead of just wrapping them up and encoding them in to the `next` parameter). This is useful for ensuring that users are automatically logged on when they first access a page through the Shopify Admin, which passes these parameters with every page request to an embedded app.
def _is_entity(bpe): """Return True if the element is a physical entity.""" if isinstance(bpe, _bp('Protein')) or \ isinstance(bpe, _bpimpl('Protein')) or \ isinstance(bpe, _bp('SmallMolecule')) or \ isinstance(bpe, _bpimpl('SmallMolecule')) or \ isinstance(bpe, _bp('Complex')) or \ isinstance(bpe, _bpimpl('Complex')) or \ isinstance(bpe, _bp('Rna')) or \ isinstance(bpe, _bpimpl('Rna')) or \ isinstance(bpe, _bp('RnaRegion')) or \ isinstance(bpe, _bpimpl('RnaRegion')) or \ isinstance(bpe, _bp('DnaRegion')) or \ isinstance(bpe, _bpimpl('DnaRegion')) or \ isinstance(bpe, _bp('PhysicalEntity')) or \ isinstance(bpe, _bpimpl('PhysicalEntity')): return True else: return False
Return True if the element is a physical entity.
def if_url(context, url_name, yes, no): """ Example: %li{ class:"{% if_url 'contacts.contact_read' 'active' '' %}" } """ current = context["request"].resolver_match.url_name return yes if url_name == current else no
Example: %li{ class:"{% if_url 'contacts.contact_read' 'active' '' %}" }
def to_grid_locator(latitude, longitude, precision='square'): """Calculate Maidenhead locator from latitude and longitude. Args: latitude (float): Position's latitude longitude (float): Position's longitude precision (str): Precision with which generate locator string Returns: str: Maidenhead locator for latitude and longitude Raise: ValueError: Invalid precision identifier ValueError: Invalid latitude or longitude value """ if precision not in ('square', 'subsquare', 'extsquare'): raise ValueError('Unsupported precision value %r' % precision) if not -90 <= latitude <= 90: raise ValueError('Invalid latitude value %r' % latitude) if not -180 <= longitude <= 180: raise ValueError('Invalid longitude value %r' % longitude) latitude += 90.0 longitude += 180.0 locator = [] field = int(longitude / LONGITUDE_FIELD) locator.append(chr(field + 65)) longitude -= field * LONGITUDE_FIELD field = int(latitude / LATITUDE_FIELD) locator.append(chr(field + 65)) latitude -= field * LATITUDE_FIELD square = int(longitude / LONGITUDE_SQUARE) locator.append(str(square)) longitude -= square * LONGITUDE_SQUARE square = int(latitude / LATITUDE_SQUARE) locator.append(str(square)) latitude -= square * LATITUDE_SQUARE if precision in ('subsquare', 'extsquare'): subsquare = int(longitude / LONGITUDE_SUBSQUARE) locator.append(chr(subsquare + 97)) longitude -= subsquare * LONGITUDE_SUBSQUARE subsquare = int(latitude / LATITUDE_SUBSQUARE) locator.append(chr(subsquare + 97)) latitude -= subsquare * LATITUDE_SUBSQUARE if precision == 'extsquare': extsquare = int(longitude / LONGITUDE_EXTSQUARE) locator.append(str(extsquare)) extsquare = int(latitude / LATITUDE_EXTSQUARE) locator.append(str(extsquare)) return ''.join(locator)
Calculate Maidenhead locator from latitude and longitude. Args: latitude (float): Position's latitude longitude (float): Position's longitude precision (str): Precision with which generate locator string Returns: str: Maidenhead locator for latitude and longitude Raise: ValueError: Invalid precision identifier ValueError: Invalid latitude or longitude value
def status_schedule(token): """ Returns the json string from the Hydrawise server after calling statusschedule.php. :param token: The users API token. :type token: string :returns: The response from the controller. If there was an error returns None. :rtype: string or None """ url = 'https://app.hydrawise.com/api/v1/statusschedule.php' payload = { 'api_key': token, 'hours': 168} get_response = requests.get(url, params=payload, timeout=REQUESTS_TIMEOUT) if get_response.status_code == 200 and \ 'error_msg' not in get_response.json(): return get_response.json() return None
Returns the json string from the Hydrawise server after calling statusschedule.php. :param token: The users API token. :type token: string :returns: The response from the controller. If there was an error returns None. :rtype: string or None
def reassemble(cls, fields, document): """ Take a previously assembled document and reassemble the given set of fields for it in place. """ for field_name in cls._instructions: if field_name in fields: maker = cls._instructions[field_name] with maker.target(document): document[field_name] = maker()
Take a previously assembled document and reassemble the given set of fields for it in place.
def cifar_generator(cifar_version, tmp_dir, training, how_many, start_from=0): """Image generator for CIFAR-10 and 100. Args: cifar_version: string; one of "cifar10" or "cifar100" tmp_dir: path to temporary storage directory. training: a Boolean; if true, we use the train set, otherwise the test set. how_many: how many images and labels to generate. start_from: from which image to start. Returns: An instance of image_generator that produces CIFAR-10 images and labels. """ if cifar_version == "cifar10": url = _CIFAR10_URL train_files = _CIFAR10_TRAIN_FILES test_files = _CIFAR10_TEST_FILES prefix = _CIFAR10_PREFIX image_size = _CIFAR10_IMAGE_SIZE label_key = "labels" elif cifar_version == "cifar100" or cifar_version == "cifar20": url = _CIFAR100_URL train_files = _CIFAR100_TRAIN_FILES test_files = _CIFAR100_TEST_FILES prefix = _CIFAR100_PREFIX image_size = _CIFAR100_IMAGE_SIZE if cifar_version == "cifar100": label_key = "fine_labels" else: label_key = "coarse_labels" _get_cifar(tmp_dir, url) data_files = train_files if training else test_files all_images, all_labels = [], [] for filename in data_files: path = os.path.join(tmp_dir, prefix, filename) with tf.gfile.Open(path, "rb") as f: if six.PY2: data = cPickle.load(f) else: data = cPickle.load(f, encoding="latin1") images = data["data"] num_images = images.shape[0] images = images.reshape((num_images, 3, image_size, image_size)) all_images.extend([ np.squeeze(images[j]).transpose((1, 2, 0)) for j in range(num_images) ]) labels = data[label_key] all_labels.extend([labels[j] for j in range(num_images)]) return image_utils.image_generator( all_images[start_from:start_from + how_many], all_labels[start_from:start_from + how_many])
Image generator for CIFAR-10 and 100. Args: cifar_version: string; one of "cifar10" or "cifar100" tmp_dir: path to temporary storage directory. training: a Boolean; if true, we use the train set, otherwise the test set. how_many: how many images and labels to generate. start_from: from which image to start. Returns: An instance of image_generator that produces CIFAR-10 images and labels.
def add_caveat(self, cav, key=None, loc=None): '''Add a caveat to the macaroon. It encrypts it using the given key pair and by looking up the location using the given locator. As a special case, if the caveat's Location field has the prefix "local " the caveat is added as a client self-discharge caveat using the public key base64-encoded in the rest of the location. In this case, the Condition field must be empty. The resulting third-party caveat will encode the condition "true" encrypted with that public key. @param cav the checkers.Caveat to be added. @param key the public key to encrypt third party caveat. @param loc locator to find information on third parties when adding third party caveats. It is expected to have a third_party_info method that will be called with a location string and should return a ThirdPartyInfo instance holding the requested information. ''' if cav.location is None: self._macaroon.add_first_party_caveat( self.namespace.resolve_caveat(cav).condition) return if key is None: raise ValueError( 'no private key to encrypt third party caveat') local_info = _parse_local_location(cav.location) if local_info is not None: info = local_info if cav.condition is not '': raise ValueError( 'cannot specify caveat condition in ' 'local third-party caveat') cav = checkers.Caveat(location='local', condition='true') else: if loc is None: raise ValueError( 'no locator when adding third party caveat') info = loc.third_party_info(cav.location) root_key = os.urandom(24) # Use the least supported version to encode the caveat. if self._version < info.version: info = ThirdPartyInfo( version=self._version, public_key=info.public_key, ) caveat_info = encode_caveat( cav.condition, root_key, info, key, self._namespace) if info.version < VERSION_3: # We're encoding for an earlier client or third party which does # not understand bundled caveat info, so use the encoded # caveat information as the caveat id. id = caveat_info else: id = self._new_caveat_id(self._caveat_id_prefix) self._caveat_data[id] = caveat_info self._macaroon.add_third_party_caveat(cav.location, root_key, id)
Add a caveat to the macaroon. It encrypts it using the given key pair and by looking up the location using the given locator. As a special case, if the caveat's Location field has the prefix "local " the caveat is added as a client self-discharge caveat using the public key base64-encoded in the rest of the location. In this case, the Condition field must be empty. The resulting third-party caveat will encode the condition "true" encrypted with that public key. @param cav the checkers.Caveat to be added. @param key the public key to encrypt third party caveat. @param loc locator to find information on third parties when adding third party caveats. It is expected to have a third_party_info method that will be called with a location string and should return a ThirdPartyInfo instance holding the requested information.
def get_config_filename(args): '''get the file name of config file''' experiment_id = check_experiment_id(args) if experiment_id is None: print_error('Please set the experiment id!') exit(1) experiment_config = Experiments() experiment_dict = experiment_config.get_all_experiments() return experiment_dict[experiment_id]['fileName']
get the file name of config file
def dad_status_output_dad_last_state(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") dad_status = ET.Element("dad_status") config = dad_status output = ET.SubElement(dad_status, "output") dad_last_state = ET.SubElement(output, "dad-last-state") dad_last_state.text = kwargs.pop('dad_last_state') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def flatten(list_of_lists): """Flatten a list of lists but maintain strings and ints as entries.""" flat_list = [] for sublist in list_of_lists: if isinstance(sublist, string_types) or isinstance(sublist, int): flat_list.append(sublist) elif sublist is None: continue elif not isinstance(sublist, string_types) and len(sublist) == 1: flat_list.append(sublist[0]) else: flat_list.append(tuple(sublist)) return flat_list
Flatten a list of lists but maintain strings and ints as entries.
def add_gate_option_group(parser): """Adds the options needed to apply gates to data. Parameters ---------- parser : object ArgumentParser instance. """ gate_group = parser.add_argument_group("Options for gating data.") gate_group.add_argument("--gate", nargs="+", type=str, metavar="IFO:CENTRALTIME:HALFDUR:TAPERDUR", help="Apply one or more gates to the data before " "filtering.") gate_group.add_argument("--gate-overwhitened", action="store_true", help="Overwhiten data first, then apply the " "gates specified in --gate. Overwhitening " "allows for sharper tapers to be used, " "since lines are not blurred.") gate_group.add_argument("--psd-gate", nargs="+", type=str, metavar="IFO:CENTRALTIME:HALFDUR:TAPERDUR", help="Apply one or more gates to the data used " "for computing the PSD. Gates are applied " "prior to FFT-ing the data for PSD " "estimation.") return gate_group
Adds the options needed to apply gates to data. Parameters ---------- parser : object ArgumentParser instance.
def validateRequest(self, uri, postVars, expectedSignature): """validate a request from plivo uri: the full URI that Plivo requested on your server postVars: post vars that Plivo sent with the request expectedSignature: signature in HTTP X-Plivo-Signature header returns true if the request passes validation, false if not """ # append the POST variables sorted by key to the uri s = uri for k, v in sorted(postVars.items()): s += k + v # compute signature and compare signatures return (base64.encodestring(hmac.new(self.auth_token, s, sha1).digest()).\ strip() == expectedSignature)
validate a request from plivo uri: the full URI that Plivo requested on your server postVars: post vars that Plivo sent with the request expectedSignature: signature in HTTP X-Plivo-Signature header returns true if the request passes validation, false if not
def to_tf_matrix(expression_matrix, gene_names, tf_names): """ :param expression_matrix: numpy matrix. Rows are observations and columns are genes. :param gene_names: a list of gene names. Each entry corresponds to the expression_matrix column with same index. :param tf_names: a list of transcription factor names. Should be a subset of gene_names. :return: tuple of: 0: A numpy matrix representing the predictor matrix for the regressions. 1: The gene names corresponding to the columns in the predictor matrix. """ tuples = [(index, gene) for index, gene in enumerate(gene_names) if gene in tf_names] tf_indices = [t[0] for t in tuples] tf_matrix_names = [t[1] for t in tuples] return expression_matrix[:, tf_indices], tf_matrix_names
:param expression_matrix: numpy matrix. Rows are observations and columns are genes. :param gene_names: a list of gene names. Each entry corresponds to the expression_matrix column with same index. :param tf_names: a list of transcription factor names. Should be a subset of gene_names. :return: tuple of: 0: A numpy matrix representing the predictor matrix for the regressions. 1: The gene names corresponding to the columns in the predictor matrix.
def _get_co_name(self, context): """ Obtain the CO name previously saved in the request state, or if not set use the request path obtained from the current context to determine the target CO. :type context: The current context :rtype: string :param context: The current context :return: CO name """ try: co_name = context.state[self.name][self.KEY_CO_NAME] logger.debug("Found CO {} from state".format(co_name)) except KeyError: co_name = self._get_co_name_from_path(context) logger.debug("Found CO {} from request path".format(co_name)) return co_name
Obtain the CO name previously saved in the request state, or if not set use the request path obtained from the current context to determine the target CO. :type context: The current context :rtype: string :param context: The current context :return: CO name
def make_parser_with_config_adder(parser, config): """factory function for a smarter parser: return an utility function that pull default from the config as well. Pull the default for parser not only from the ``default`` kwarg, but also if an identical value is find in ``config`` where leading ``--`` or ``--no`` is removed. If the option is a boolean flag, automatically register an opposite, exclusive option by prepending or removing the `--no-`. This is useful to overwrite config in ``.travis.yml`` Mutate the config object and remove know keys in order to detect unused options afterwoard. """ def internal(arg, **kwargs): invert = { 'store_true':'store_false', 'store_false':'store_true', } if arg.startswith('--no-'): key = arg[5:] else: key = arg[2:] if 'default' in kwargs: if key in config: kwargs['default'] = config[key] del config[key] action = kwargs.get('action') if action in invert: exclusive_grp = parser.add_mutually_exclusive_group() exclusive_grp.add_argument(arg, **kwargs) kwargs['action'] = invert[action] kwargs['help'] = 'Inverse of "%s"' % arg if arg.startswith('--no-'): arg = '--%s' % arg[5:] else: arg = '--no-%s' % arg[2:] exclusive_grp.add_argument(arg, **kwargs) else: parser.add_argument(arg, **kwargs) return internal
factory function for a smarter parser: return an utility function that pull default from the config as well. Pull the default for parser not only from the ``default`` kwarg, but also if an identical value is find in ``config`` where leading ``--`` or ``--no`` is removed. If the option is a boolean flag, automatically register an opposite, exclusive option by prepending or removing the `--no-`. This is useful to overwrite config in ``.travis.yml`` Mutate the config object and remove know keys in order to detect unused options afterwoard.
def draw(self, scale=0.7, filename=None, style=None, output='text', interactive=False, line_length=None, plot_barriers=True, reverse_bits=False, justify=None): """Draw the quantum circuit Using the output parameter you can specify the format. The choices are: 0. text: ASCII art string 1. latex: high-quality images, but heavy external software dependencies 2. matplotlib: purely in Python with no external dependencies Defaults to an overcomplete basis, in order to not alter gates. Args: scale (float): scale of image to draw (shrink if < 1) filename (str): file path to save image to style (dict or str): dictionary of style or file name of style file. You can refer to the :ref:`Style Dict Doc <style-dict-doc>` for more information on the contents. output (str): Select the output method to use for drawing the circuit. Valid choices are `text`, `latex`, `latex_source`, `mpl`. interactive (bool): when set true show the circuit in a new window (for `mpl` this depends on the matplotlib backend being used supporting this). Note when used with either the `text` or the `latex_source` output type this has no effect and will be silently ignored. line_length (int): sets the length of the lines generated by `text` reverse_bits (bool): When set to True reverse the bit order inside registers for the output visualization. plot_barriers (bool): Enable/disable drawing barriers in the output circuit. Defaults to True. justify (string): Options are `left`, `right` or `none`, if anything else is supplied it defaults to left justified. It refers to where gates should be placed in the output circuit if there is an option. `none` results in each gate being placed in its own column. Currently only supported by text drawer. Returns: PIL.Image or matplotlib.figure or str or TextDrawing: * PIL.Image: (output `latex`) an in-memory representation of the image of the circuit diagram. * matplotlib.figure: (output `mpl`) a matplotlib figure object for the circuit diagram. * str: (output `latex_source`). The LaTeX source code. * TextDrawing: (output `text`). A drawing that can be printed as ascii art Raises: VisualizationError: when an invalid output method is selected """ from qiskit.tools import visualization return visualization.circuit_drawer(self, scale=scale, filename=filename, style=style, output=output, interactive=interactive, line_length=line_length, plot_barriers=plot_barriers, reverse_bits=reverse_bits, justify=justify)
Draw the quantum circuit Using the output parameter you can specify the format. The choices are: 0. text: ASCII art string 1. latex: high-quality images, but heavy external software dependencies 2. matplotlib: purely in Python with no external dependencies Defaults to an overcomplete basis, in order to not alter gates. Args: scale (float): scale of image to draw (shrink if < 1) filename (str): file path to save image to style (dict or str): dictionary of style or file name of style file. You can refer to the :ref:`Style Dict Doc <style-dict-doc>` for more information on the contents. output (str): Select the output method to use for drawing the circuit. Valid choices are `text`, `latex`, `latex_source`, `mpl`. interactive (bool): when set true show the circuit in a new window (for `mpl` this depends on the matplotlib backend being used supporting this). Note when used with either the `text` or the `latex_source` output type this has no effect and will be silently ignored. line_length (int): sets the length of the lines generated by `text` reverse_bits (bool): When set to True reverse the bit order inside registers for the output visualization. plot_barriers (bool): Enable/disable drawing barriers in the output circuit. Defaults to True. justify (string): Options are `left`, `right` or `none`, if anything else is supplied it defaults to left justified. It refers to where gates should be placed in the output circuit if there is an option. `none` results in each gate being placed in its own column. Currently only supported by text drawer. Returns: PIL.Image or matplotlib.figure or str or TextDrawing: * PIL.Image: (output `latex`) an in-memory representation of the image of the circuit diagram. * matplotlib.figure: (output `mpl`) a matplotlib figure object for the circuit diagram. * str: (output `latex_source`). The LaTeX source code. * TextDrawing: (output `text`). A drawing that can be printed as ascii art Raises: VisualizationError: when an invalid output method is selected
def create_can_publish_and_can_republish_permissions(sender, **kwargs): """ Add `can_publish` and `ca_nrepublish` permissions for each publishable model in the system. """ for model in sender.get_models(): if not issubclass(model, PublishingModel): continue content_type = ContentType.objects.get_for_model(model) permission, created = Permission.objects.get_or_create( content_type=content_type, codename='can_publish', defaults=dict(name='Can Publish %s' % model.__name__)) permission, created = Permission.objects.get_or_create( content_type=content_type, codename='can_republish', defaults=dict(name='Can Republish %s' % model.__name__))
Add `can_publish` and `ca_nrepublish` permissions for each publishable model in the system.
def get(self, request, bot_id, format=None): """ Get list of Telegram bots --- serializer: TelegramBotSerializer responseMessages: - code: 401 message: Not authenticated """ return super(TelegramBotList, self).get(request, bot_id, format)
Get list of Telegram bots --- serializer: TelegramBotSerializer responseMessages: - code: 401 message: Not authenticated
def emit_message(self, message): """ Send a message to the channel. We also emit the message back to the sender's WebSocket. """ try: nickname_color = self.nicknames[self.nickname] except KeyError: # Only accept messages if we've joined. return message = message[:settings.MAX_MESSAGE_LENGTH] # Handle IRC commands. if message.startswith("/"): self.connection.send_raw(message.lstrip("/")) return self.message_channel(message) self.namespace.emit("message", self.nickname, message, nickname_color)
Send a message to the channel. We also emit the message back to the sender's WebSocket.
def _handle_ansi_color_codes(self, s): """Replace ansi escape sequences with spans of appropriately named css classes.""" parts = HtmlReporter._ANSI_COLOR_CODE_RE.split(s) ret = [] span_depth = 0 # Note that len(parts) is always odd: text, code, text, code, ..., text. for i in range(0, len(parts), 2): ret.append(parts[i]) if i + 1 < len(parts): for code in parts[i + 1].split(';'): if code == 0: # Reset. while span_depth > 0: ret.append('</span>') span_depth -= 1 else: ret.append('<span class="ansi-{}">'.format(code)) span_depth += 1 while span_depth > 0: ret.append('</span>') span_depth -= 1 return ''.join(ret)
Replace ansi escape sequences with spans of appropriately named css classes.
def sqrt_rc_imp(Ns,alpha,M=6): """ A truncated square root raised cosine pulse used in digital communications. The pulse shaping factor :math:`0 < \\alpha < 1` is required as well as the truncation factor M which sets the pulse duration to be :math:`2*M*T_{symbol}`. Parameters ---------- Ns : number of samples per symbol alpha : excess bandwidth factor on (0, 1), e.g., 0.35 M : equals RC one-sided symbol truncation factor Returns ------- b : ndarray containing the pulse shape Notes ----- The pulse shape b is typically used as the FIR filter coefficients when forming a pulse shaped digital communications waveform. When square root raised cosine (SRC) pulse is used to generate Tx signals and at the receiver used as a matched filter (receiver FIR filter), the received signal is now raised cosine shaped, thus having zero intersymbol interference and the optimum removal of additive white noise if present at the receiver input. Examples -------- Ten samples per symbol and :math:`\\alpha = 0.35`. >>> import matplotlib.pyplot as plt >>> from numpy import arange >>> from sk_dsp_comm.digitalcom import sqrt_rc_imp >>> b = sqrt_rc_imp(10,0.35) >>> n = arange(-10*6,10*6+1) >>> plt.stem(n,b) >>> plt.show() """ # Design the filter n = np.arange(-M*Ns,M*Ns+1) b = np.zeros(len(n)) Ns *= 1.0 a = alpha for i in range(len(n)): if abs(1 - 16*a**2*(n[i]/Ns)**2) <= np.finfo(np.float).eps/2: b[i] = 1/2.*((1+a)*np.sin((1+a)*np.pi/(4.*a))-(1-a)*np.cos((1-a)*np.pi/(4.*a))+(4*a)/np.pi*np.sin((1-a)*np.pi/(4.*a))) else: b[i] = 4*a/(np.pi*(1 - 16*a**2*(n[i]/Ns)**2)) b[i] = b[i]*(np.cos((1+a)*np.pi*n[i]/Ns) + np.sinc((1-a)*n[i]/Ns)*(1-a)*np.pi/(4.*a)) return b
A truncated square root raised cosine pulse used in digital communications. The pulse shaping factor :math:`0 < \\alpha < 1` is required as well as the truncation factor M which sets the pulse duration to be :math:`2*M*T_{symbol}`. Parameters ---------- Ns : number of samples per symbol alpha : excess bandwidth factor on (0, 1), e.g., 0.35 M : equals RC one-sided symbol truncation factor Returns ------- b : ndarray containing the pulse shape Notes ----- The pulse shape b is typically used as the FIR filter coefficients when forming a pulse shaped digital communications waveform. When square root raised cosine (SRC) pulse is used to generate Tx signals and at the receiver used as a matched filter (receiver FIR filter), the received signal is now raised cosine shaped, thus having zero intersymbol interference and the optimum removal of additive white noise if present at the receiver input. Examples -------- Ten samples per symbol and :math:`\\alpha = 0.35`. >>> import matplotlib.pyplot as plt >>> from numpy import arange >>> from sk_dsp_comm.digitalcom import sqrt_rc_imp >>> b = sqrt_rc_imp(10,0.35) >>> n = arange(-10*6,10*6+1) >>> plt.stem(n,b) >>> plt.show()
def validate_filters_or_records(filters_or_records): """Validation for filters_or_records variable from bulk_modify and bulk_delete""" # If filters_or_records is empty, fail if not filters_or_records: raise ValueError('Must provide at least one filter tuples or Records') # If filters_or_records is not list of Record or tuple, fail if not isinstance(filters_or_records[0], (Record, tuple)): raise ValueError('Cannot provide both filter tuples and Records') # If filters_or_records is not list of either Record or only tuple, fail _type = type(filters_or_records[0]) for item in filters_or_records: if not isinstance(item, _type): raise ValueError("Expected filter tuple or Record, received {0}".format(item)) return _type
Validation for filters_or_records variable from bulk_modify and bulk_delete
async def connect(self): """ Get an connection for the self instance """ if isinstance(self.connection, dict): # a dict like {'host': 'localhost', 'port': 6379, # 'db': 0, 'password': 'pass'} kwargs = self.connection.copy() address = ( kwargs.pop('host', 'localhost'), kwargs.pop('port', 6379) ) redis_kwargs = kwargs elif isinstance(self.connection, aioredis.Redis): self._pool = self.connection else: # a tuple or list ('localhost', 6379) # a string "redis://host:6379/0?encoding=utf-8" or # a unix domain socket path "/path/to/redis.sock" address = self.connection redis_kwargs = {} if self._pool is None: async with self._lock: if self._pool is None: self.log.debug('Connecting %s', repr(self)) self._pool = await self._create_redis_pool( address, **redis_kwargs, minsize=1, maxsize=100) return await self._pool
Get an connection for the self instance
def _load32(ins): """ Load a 32 bit value from a memory address If 2nd arg. start with '*', it is always treated as an indirect value. """ output = _32bit_oper(ins.quad[2]) output.append('push de') output.append('push hl') return output
Load a 32 bit value from a memory address If 2nd arg. start with '*', it is always treated as an indirect value.
def strip_trailing_slashes(self, path): """Return input path minus any trailing slashes.""" m = re.match(r"(.*)/+$", path) if (m is None): return(path) return(m.group(1))
Return input path minus any trailing slashes.
def _split_iso9660_filename(fullname): # type: (bytes) -> Tuple[bytes, bytes, bytes] ''' A function to split an ISO 9660 filename into its constituent parts. This is the name, the extension, and the version number. Parameters: fullname - The name to split. Returns: A tuple containing the name, extension, and version. ''' namesplit = fullname.split(b';') version = b'' if len(namesplit) > 1: version = namesplit.pop() rest = b';'.join(namesplit) dotsplit = rest.split(b'.') if len(dotsplit) == 1: name = dotsplit[0] extension = b'' else: name = b'.'.join(dotsplit[:-1]) extension = dotsplit[-1] return (name, extension, version)
A function to split an ISO 9660 filename into its constituent parts. This is the name, the extension, and the version number. Parameters: fullname - The name to split. Returns: A tuple containing the name, extension, and version.
def lrucache(func, size): """ A simple implementation of a least recently used (LRU) cache. Memoizes the recent calls of a computationally intensive function. Parameters ---------- func : function Must be unary (takes a single argument) size : int The size of the cache (number of previous calls to store) """ if size == 0: return func elif size < 0: raise ValueError("size argument must be a positive integer") # this only works for unary functions if not is_arity(1, func): raise ValueError("The function must be unary (take a single argument)") # initialize the cache cache = OrderedDict() def wrapper(x): if not(type(x) is np.ndarray): raise ValueError("Input must be an ndarray") # hash the input, using tostring for small and repr for large arrays if x.size <= 1e4: key = hash(x.tostring()) else: key = hash(repr(x)) # if the key is not in the cache, evalute the function if key not in cache: # clear space if necessary (keeps the most recent keys) if len(cache) >= size: cache.popitem(last=False) # store the new value in the cache cache[key] = func(x) return cache[key] return wrapper
A simple implementation of a least recently used (LRU) cache. Memoizes the recent calls of a computationally intensive function. Parameters ---------- func : function Must be unary (takes a single argument) size : int The size of the cache (number of previous calls to store)
def console_output(msg, logging_msg=None): """Use instead of print, to clear the status information before printing""" assert isinstance(msg, bytes) assert isinstance(logging_msg, bytes) or logging_msg is None from polysh import remote_dispatcher remote_dispatcher.log(logging_msg or msg) if remote_dispatcher.options.interactive: from polysh.stdin import the_stdin_thread the_stdin_thread.no_raw_input() global last_status_length if last_status_length: safe_write('\r{}\r'.format( last_status_length * ' ').encode()) last_status_length = 0 safe_write(msg)
Use instead of print, to clear the status information before printing
def read_file(self, location): """Read in a yaml file and return as a python object""" try: return yaml.load(open(location)) except (yaml.parser.ParserError, yaml.scanner.ScannerError) as error: raise self.BadFileErrorKls("Failed to read yaml", location=location, error_type=error.__class__.__name__, error="{0}{1}".format(error.problem, error.problem_mark))
Read in a yaml file and return as a python object
def timeseries(self): """ Feed-in time series of generator It returns the actual time series used in power flow analysis. If :attr:`_timeseries` is not :obj:`None`, it is returned. Otherwise, :meth:`timeseries` looks for generation and curtailment time series of the according type of technology (and weather cell) in :class:`~.grid.network.TimeSeries`. Returns ------- :pandas:`pandas.DataFrame<dataframe>` DataFrame containing active power in kW in column 'p' and reactive power in kVA in column 'q'. """ if self._timeseries is None: # get time series for active power depending on if they are # differentiated by weather cell ID or not if isinstance(self.grid.network.timeseries.generation_fluctuating. columns, pd.MultiIndex): if self.weather_cell_id: try: timeseries = self.grid.network.timeseries.\ generation_fluctuating[ self.type, self.weather_cell_id].to_frame('p') except KeyError: logger.exception("No time series for type {} and " "weather cell ID {} given.".format( self.type, self.weather_cell_id)) raise else: logger.exception("No weather cell ID provided for " "fluctuating generator {}.".format( repr(self))) raise KeyError else: try: timeseries = self.grid.network.timeseries.\ generation_fluctuating[self.type].to_frame('p') except KeyError: logger.exception("No time series for type {} " "given.".format(self.type)) raise timeseries = timeseries * self.nominal_capacity # subtract curtailment if self.curtailment is not None: timeseries = timeseries.join( self.curtailment.to_frame('curtailment'), how='left') timeseries.p = timeseries.p - timeseries.curtailment.fillna(0) if self.timeseries_reactive is not None: timeseries['q'] = self.timeseries_reactive else: timeseries['q'] = timeseries['p'] * self.q_sign * tan(acos( self.power_factor)) return timeseries else: return self._timeseries.loc[ self.grid.network.timeseries.timeindex, :]
Feed-in time series of generator It returns the actual time series used in power flow analysis. If :attr:`_timeseries` is not :obj:`None`, it is returned. Otherwise, :meth:`timeseries` looks for generation and curtailment time series of the according type of technology (and weather cell) in :class:`~.grid.network.TimeSeries`. Returns ------- :pandas:`pandas.DataFrame<dataframe>` DataFrame containing active power in kW in column 'p' and reactive power in kVA in column 'q'.
def _map_query_path_to_location_info(query_metadata_table): """Create a map from each query path to a LocationInfo at that path. Args: query_metadata_table: QueryMetadataTable, object containing all metadata collected during query processing, including location metadata (e.g. which locations are folded or optional). Returns: Dict[Tuple[str], LocationInfo], dictionary mapping query path to LocationInfo at that path. """ query_path_to_location_info = {} for location, location_info in query_metadata_table.registered_locations: if not isinstance(location, Location): continue if location.query_path in query_path_to_location_info: # make sure the stored location information equals the new location information # for the fields the SQL backend requires. equivalent_location_info = query_path_to_location_info[location.query_path] if not _location_infos_equal(location_info, equivalent_location_info): raise AssertionError( u'Differing LocationInfos at query_path {} between {} and {}. Expected ' u'parent_location.query_path, optional_scopes_depth, recursive_scopes_depth ' u'and types to be equal for LocationInfos sharing the same query path.'.format( location.query_path, location_info, equivalent_location_info)) query_path_to_location_info[location.query_path] = location_info return query_path_to_location_info
Create a map from each query path to a LocationInfo at that path. Args: query_metadata_table: QueryMetadataTable, object containing all metadata collected during query processing, including location metadata (e.g. which locations are folded or optional). Returns: Dict[Tuple[str], LocationInfo], dictionary mapping query path to LocationInfo at that path.
def check_vip_ip(self, ip, environment_vip): """ Check available ipv6 in environment vip """ uri = 'api/ipv6/ip/%s/environment-vip/%s/' % (ip, environment_vip) return super(ApiNetworkIPv6, self).get(uri)
Check available ipv6 in environment vip
def warning(self, msg, indent=0, **kwargs): """invoke ``self.logger.warning``""" return self.logger.warning(self._indent(msg, indent), **kwargs)
invoke ``self.logger.warning``
def triangle_normal(tri): """ Computes the (approximate) normal vector of the input triangle. :param tri: triangle object :type tri: elements.Triangle :return: normal vector of the triangle :rtype: tuple """ vec1 = vector_generate(tri.vertices[0].data, tri.vertices[1].data) vec2 = vector_generate(tri.vertices[1].data, tri.vertices[2].data) return vector_cross(vec1, vec2)
Computes the (approximate) normal vector of the input triangle. :param tri: triangle object :type tri: elements.Triangle :return: normal vector of the triangle :rtype: tuple
def active(self): """Returns all outlets that are currently active and have sales.""" qs = self.get_queryset() return qs.filter( models.Q( models.Q(start_date__isnull=True) | models.Q(start_date__lte=now().date()) ) & models.Q( models.Q(end_date__isnull=True) | models.Q(end_date__gte=now().date()) ) ).distinct()
Returns all outlets that are currently active and have sales.
def _replace_type_to_regex(cls, match): """ /<int:id> -> r'(?P<id>\d+)' """ groupdict = match.groupdict() _type = groupdict.get('type') type_regex = cls.TYPE_REGEX_MAP.get(_type, '[^/]+') name = groupdict.get('name') return r'(?P<{name}>{type_regex})'.format( name=name, type_regex=type_regex )
/<int:id> -> r'(?P<id>\d+)'
def getSpec(cls): """ Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getSpec`. """ ns = dict( description=KNNClassifierRegion.__doc__, singleNodeOnly=True, inputs=dict( categoryIn=dict( description='Vector of zero or more category indices for this input' 'sample. -1 implies no category.', dataType='Real32', count=0, required=True, regionLevel=True, isDefaultInput=False, requireSplitterMap=False), bottomUpIn=dict( description='Belief values over children\'s groups', dataType='Real32', count=0, required=True, regionLevel=False, isDefaultInput=True, requireSplitterMap=False), partitionIn=dict( description='Partition ID of the input sample', dataType='Real32', count=0, required=True, regionLevel=True, isDefaultInput=False, requireSplitterMap=False), auxDataIn=dict( description='Auxiliary data from the sensor', dataType='Real32', count=0, required=False, regionLevel=True, isDefaultInput=False, requireSplitterMap=False) ), outputs=dict( categoriesOut=dict( description='A vector representing, for each category ' 'index, the likelihood that the input to the node belongs ' 'to that category based on the number of neighbors of ' 'that category that are among the nearest K.', dataType='Real32', count=0, regionLevel=True, isDefaultOutput=True), bestPrototypeIndices=dict( description='A vector that lists, in descending order of ' 'the match, the positions of the prototypes ' 'that best match the input pattern.', dataType='Real32', count=0, regionLevel=True, isDefaultOutput=False), categoryProbabilitiesOut=dict( description='A vector representing, for each category ' 'index, the probability that the input to the node belongs ' 'to that category based on the distance to the nearest ' 'neighbor of each category.', dataType='Real32', count=0, regionLevel=True, isDefaultOutput=True), ), parameters=dict( learningMode=dict( description='Boolean (0/1) indicating whether or not a region ' 'is in learning mode.', dataType='UInt32', count=1, constraints='bool', defaultValue=1, accessMode='ReadWrite'), inferenceMode=dict( description='Boolean (0/1) indicating whether or not a region ' 'is in inference mode.', dataType='UInt32', count=1, constraints='bool', defaultValue=0, accessMode='ReadWrite'), acceptanceProbability=dict( description='During learning, inputs are learned with ' 'probability equal to this parameter. ' 'If set to 1.0, the default, ' 'all inputs will be considered ' '(subject to other tests).', dataType='Real32', count=1, constraints='', defaultValue=1.0, #accessMode='Create'), accessMode='ReadWrite'), # and Create too confusion=dict( description='Confusion matrix accumulated during inference. ' 'Reset with reset(). This is available to Python ' 'client code only.', dataType='Handle', count=2, constraints='', defaultValue=None, accessMode='Read'), activeOutputCount=dict( description='The number of active elements in the ' '"categoriesOut" output.', dataType='UInt32', count=1, constraints='', defaultValue=0, accessMode='Read'), categoryCount=dict( description='An integer indicating the number of ' 'categories that have been learned', dataType='UInt32', count=1, constraints='', defaultValue=None, accessMode='Read'), patternCount=dict( description='Number of patterns learned by the classifier.', dataType='UInt32', count=1, constraints='', defaultValue=None, accessMode='Read'), patternMatrix=dict( description='The actual patterns learned by the classifier, ' 'returned as a matrix.', dataType='Handle', count=1, constraints='', defaultValue=None, accessMode='Read'), k=dict( description='The number of nearest neighbors to use ' 'during inference.', dataType='UInt32', count=1, constraints='', defaultValue=1, accessMode='Create'), maxCategoryCount=dict( description='The maximal number of categories the ' 'classifier will distinguish between.', dataType='UInt32', count=1, constraints='', defaultValue=2, accessMode='Create'), distanceNorm=dict( description='The norm to use for a distance metric (i.e., ' 'the "p" in Lp-norm)', dataType='Real32', count=1, constraints='', defaultValue=2.0, accessMode='ReadWrite'), #accessMode='Create'), distanceMethod=dict( description='Method used to compute distances between inputs and' 'prototypes. Possible options are norm, rawOverlap, ' 'pctOverlapOfLarger, and pctOverlapOfProto', dataType="Byte", count=0, constraints='enum: norm, rawOverlap, pctOverlapOfLarger, ' 'pctOverlapOfProto, pctOverlapOfInput', defaultValue='norm', accessMode='ReadWrite'), outputProbabilitiesByDist=dict( description='If True, categoryProbabilitiesOut is the probability of ' 'each category based on the distance to the nearest neighbor of ' 'each category. If False, categoryProbabilitiesOut is the ' 'percentage of neighbors among the top K that are of each category.', dataType='UInt32', count=1, constraints='bool', defaultValue=0, accessMode='Create'), distThreshold=dict( description='Distance Threshold. If a pattern that ' 'is less than distThreshold apart from ' 'the input pattern already exists in the ' 'KNN memory, then the input pattern is ' 'not added to KNN memory.', dataType='Real32', count=1, constraints='', defaultValue=0.0, accessMode='ReadWrite'), inputThresh=dict( description='Input binarization threshold, used if ' '"doBinarization" is True.', dataType='Real32', count=1, constraints='', defaultValue=0.5, accessMode='Create'), doBinarization=dict( description='Whether or not to binarize the input vectors.', dataType='UInt32', count=1, constraints='bool', defaultValue=0, accessMode='Create'), useSparseMemory=dict( description='A boolean flag that determines whether or ' 'not the KNNClassifier will use sparse Memory', dataType='UInt32', count=1, constraints='', defaultValue=1, accessMode='Create'), minSparsity=dict( description="If useSparseMemory is set, only vectors with sparsity" " >= minSparsity will be stored during learning. A value" " of 0.0 implies all vectors will be stored. A value of" " 0.1 implies only vectors with at least 10% sparsity" " will be stored", dataType='Real32', count=1, constraints='', defaultValue=0.0, accessMode='ReadWrite'), sparseThreshold=dict( description='If sparse memory is used, input variables ' 'whose absolute value is less than this ' 'threshold will be stored as zero', dataType='Real32', count=1, constraints='', defaultValue=0.0, accessMode='Create'), relativeThreshold=dict( description='Whether to multiply sparseThreshold by max value ' ' in input', dataType='UInt32', count=1, constraints='bool', defaultValue=0, accessMode='Create'), winnerCount=dict( description='Only this many elements of the input are ' 'stored. All elements are stored if 0.', dataType='UInt32', count=1, constraints='', defaultValue=0, accessMode='Create'), doSphering=dict( description='A boolean indicating whether or not data should' 'be "sphered" (i.e. each dimension should be normalized such' 'that its mean and variance are zero and one, respectively.) This' ' sphering normalization would be performed after all training ' 'samples had been received but before inference was performed. ' 'The dimension-specific normalization constants would then ' ' be applied to all future incoming vectors prior to performing ' ' conventional NN inference.', dataType='UInt32', count=1, constraints='bool', defaultValue=0, accessMode='Create'), SVDSampleCount=dict( description='If not 0, carries out SVD transformation after ' 'that many samples have been seen.', dataType='UInt32', count=1, constraints='', defaultValue=0, accessMode='Create'), SVDDimCount=dict( description='Number of dimensions to keep after SVD if greater ' 'than 0. If set to -1 it is considered unspecified. ' 'If set to 0 it is consider "adaptive" and the number ' 'is chosen automatically.', dataType='Int32', count=1, constraints='', defaultValue=-1, accessMode='Create'), fractionOfMax=dict( description='The smallest singular value which is retained ' 'as a fraction of the largest singular value. This is ' 'used only if SVDDimCount==0 ("adaptive").', dataType='UInt32', count=1, constraints='', defaultValue=0, accessMode='Create'), useAuxiliary=dict( description='Whether or not the classifier should use auxiliary ' 'input data.', dataType='UInt32', count=1, constraints='bool', defaultValue=0, accessMode='Create'), justUseAuxiliary=dict( description='Whether or not the classifier should ONLUY use the ' 'auxiliary input data.', dataType='UInt32', count=1, constraints='bool', defaultValue=0, accessMode='Create'), verbosity=dict( description='An integer that controls the verbosity level, ' '0 means no verbose output, increasing integers ' 'provide more verbosity.', dataType='UInt32', count=1, constraints='', defaultValue=0 , accessMode='ReadWrite'), keepAllDistances=dict( description='Whether to store all the protoScores in an array, ' 'rather than just the ones for the last inference. ' 'When this parameter is changed from True to False, ' 'all the scores are discarded except for the most ' 'recent one.', dataType='UInt32', count=1, constraints='bool', defaultValue=None, accessMode='ReadWrite'), replaceDuplicates=dict( description='A boolean flag that determines whether or' 'not the KNNClassifier should replace duplicates' 'during learning. This should be on when online' 'learning.', dataType='UInt32', count=1, constraints='bool', defaultValue=None, accessMode='ReadWrite'), cellsPerCol=dict( description='If >= 1, we assume the input is organized into columns, ' 'in the same manner as the temporal memory AND ' 'whenever we store a new prototype, we only store the ' 'start cell (first cell) in any column which is bursting.' 'colum ', dataType='UInt32', count=1, constraints='', defaultValue=0, accessMode='Create'), maxStoredPatterns=dict( description='Limits the maximum number of the training patterns ' 'stored. When KNN learns in a fixed capacity mode, ' 'the unused patterns are deleted once the number ' 'of stored patterns is greater than maxStoredPatterns' 'columns. [-1 is no limit] ', dataType='Int32', count=1, constraints='', defaultValue=-1, accessMode='Create'), ), commands=dict() ) return ns
Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getSpec`.
def generate_query_string(self, otp, nonce, timestamp=False, sl=None, timeout=None): """ Returns a query string which is sent to the validation servers. """ data = [('id', self.client_id), ('otp', otp), ('nonce', nonce)] if timestamp: data.append(('timestamp', '1')) if sl is not None: if sl not in range(0, 101) and sl not in ['fast', 'secure']: raise Exception('sl parameter value must be between 0 and ' '100 or string "fast" or "secure"') data.append(('sl', sl)) if timeout: data.append(('timeout', timeout)) query_string = urlencode(data) if self.key: hmac_signature = self.generate_message_signature(query_string) hmac_signature = hmac_signature query_string += '&h=%s' % (hmac_signature.replace('+', '%2B')) return query_string
Returns a query string which is sent to the validation servers.
def controller(self): """Show current linked controllers.""" if hasattr(self, 'controllers'): if len(self.controllers) > 1: # in the future, we should support more controllers raise TypeError("Only one controller per account.") return self.controllers[0] raise AttributeError("There is no controller assigned.")
Show current linked controllers.
def get_all_webhooks(self, **kwargs): # noqa: E501 """Get all webhooks for a customer # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_all_webhooks(async_req=True) >>> result = thread.get() :param async_req bool :param int offset: :param int limit: :return: ResponseContainerPagedNotificant If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_all_webhooks_with_http_info(**kwargs) # noqa: E501 else: (data) = self.get_all_webhooks_with_http_info(**kwargs) # noqa: E501 return data
Get all webhooks for a customer # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_all_webhooks(async_req=True) >>> result = thread.get() :param async_req bool :param int offset: :param int limit: :return: ResponseContainerPagedNotificant If the method is called asynchronously, returns the request thread.
async def write(self, data): """Writes a chunk of data to the streaming response. :param data: bytes-ish data to be written. """ if type(data) != bytes: data = self._encode_body(data) self.protocol.push_data(b"%x\r\n%b\r\n" % (len(data), data)) await self.protocol.drain()
Writes a chunk of data to the streaming response. :param data: bytes-ish data to be written.
def _item_check(self, dim_vals, data): """ Applies optional checks to individual data elements before they are inserted ensuring that they are of a certain type. Subclassed may implement further element restrictions. """ if not self._check_items: return elif self.data_type is not None and not isinstance(data, self.data_type): if isinstance(self.data_type, tuple): data_type = tuple(dt.__name__ for dt in self.data_type) else: data_type = self.data_type.__name__ raise TypeError('{slf} does not accept {data} type, data elements have ' 'to be a {restr}.'.format(slf=type(self).__name__, data=type(data).__name__, restr=data_type)) elif not len(dim_vals) == self.ndims: raise KeyError('The data contains keys of length %d, but the kdims ' 'only declare %d dimensions. Ensure that the number ' 'of kdims match the length of the keys in your data.' % (len(dim_vals), self.ndims))
Applies optional checks to individual data elements before they are inserted ensuring that they are of a certain type. Subclassed may implement further element restrictions.
def get_mapped_filenames(self, memoryMap = None): """ Retrieves the filenames for memory mapped files in the debugee. @type memoryMap: list( L{win32.MemoryBasicInformation} ) @param memoryMap: (Optional) Memory map returned by L{get_memory_map}. If not given, the current memory map is used. @rtype: dict( int S{->} str ) @return: Dictionary mapping memory addresses to file names. Native filenames are converted to Win32 filenames when possible. """ hProcess = self.get_handle( win32.PROCESS_VM_READ | win32.PROCESS_QUERY_INFORMATION ) if not memoryMap: memoryMap = self.get_memory_map() mappedFilenames = dict() for mbi in memoryMap: if mbi.Type not in (win32.MEM_IMAGE, win32.MEM_MAPPED): continue baseAddress = mbi.BaseAddress fileName = "" try: fileName = win32.GetMappedFileName(hProcess, baseAddress) fileName = PathOperations.native_to_win32_pathname(fileName) except WindowsError: #e = sys.exc_info()[1] #try: # msg = "Can't get mapped file name at address %s in process " \ # "%d, reason: %s" % (HexDump.address(baseAddress), # self.get_pid(), # e.strerror) # warnings.warn(msg, Warning) #except Exception: pass mappedFilenames[baseAddress] = fileName return mappedFilenames
Retrieves the filenames for memory mapped files in the debugee. @type memoryMap: list( L{win32.MemoryBasicInformation} ) @param memoryMap: (Optional) Memory map returned by L{get_memory_map}. If not given, the current memory map is used. @rtype: dict( int S{->} str ) @return: Dictionary mapping memory addresses to file names. Native filenames are converted to Win32 filenames when possible.
def implementation(self, commands_module: arg(short_option='-m') = DEFAULT_COMMANDS_MODULE, config_file: arg(short_option='-f') = None, # Globals globals_: arg( container=dict, type=json_value, help='Global variables & default args for *all* commands; will be ' 'injected into itself, default args, and environment variables ' '(higher precedence than keyword args)' ) = None, # Special globals (for command line convenience) env: arg(help='env will be added to globals if specified') = None, version: arg(help='version will be added to globals if specified') = None, echo: arg( type=bool, help='echo=True will be added to globals', inverse_help='echo=False will be added to globals' ) = None, # Environment variables environ: arg( container=dict, help='Additional environment variables; ' 'added just before commands are run' ) = None, # Meta info: arg(help='Show info and exit') = False, list_commands: arg(help='Show info & commands and exit') = False, debug: arg( type=bool, help='Print debugging info & re-raise exceptions; also added to globals' ) = None, *, all_argv=(), run_argv=(), command_argv=(), cli_args=()): """Run one or more commands in succession. For example, assume the commands ``local`` and ``remote`` have been defined; the following will run ``ls`` first on the local host and then on the remote host:: runcommands local ls remote <host> ls When a command name is encountered in ``argv``, it will be considered the starting point of the next command *unless* the previous item in ``argv`` was an option like ``--xyz`` that expects a value (i.e., it's not a flag). To avoid ambiguity when an option value matches a command name, the value can be prepended with a colon to force it to be considered a value and not a command name. """ collection = Collection.load_from_module(commands_module) config_file = self.find_config_file(config_file) cli_globals = globals_ or {} if env: cli_globals['env'] = env if version: cli_globals['version'] = version if echo is not None: cli_globals['echo'] = echo if debug is not None: cli_globals['debug'] = debug if config_file: args_from_file = self.read_config_file(config_file, collection) args = merge_dicts(args_from_file, {'environ': environ or {}}) config_file_globals = args['globals'] env = cli_globals.get('env') or config_file_globals.get('env') if env: envs = args['envs'] try: env_globals = envs[env] except KeyError: raise RunnerError('Unknown env: {env}'.format_map(locals())) globals_ = merge_dicts(config_file_globals, env_globals, cli_globals) globals_['envs'] = envs else: globals_ = merge_dicts(config_file_globals, cli_globals) default_args = {name: {} for name in collection} default_args = merge_dicts(default_args, args.get('args') or {}) for command_name, command_default_args in default_args.items(): command = collection[command_name] # Normalize arg names from default args section. for name in tuple(command_default_args): param = command.find_parameter(name) if param is None: raise RunnerError( 'Unknown arg for command {command_name} in default args section of ' '{config_file}: {name}' .format_map(locals())) if param is not None and name != param.name: command_default_args[param.name] = command_default_args.pop(name) # Add globals that correspond to this command (that # aren't present in default args section). for name, value in globals_.items(): param = command.find_parameter(name) if param is not None: if param.name not in command_default_args: command_default_args[param.name] = value elif command.has_kwargs: name = name.replace('-', '_') command_default_args[name] = value # Convert lists to tuples for the command's args that are # specified as being tuples. for name, value in command_default_args.items(): command_arg = command.find_arg(name) if command_arg.container and isinstance(value, list): command_default_args[name] = command_arg.container(value) default_args = {name: args for name, args in default_args.items() if args} environ = args['environ'] else: globals_ = cli_globals default_args = {} environ = environ or {} debug = globals_.get('debug', False) show_info = info or list_commands or not command_argv or debug print_and_exit = info or list_commands globals_, default_args, environ = self.interpolate(globals_, default_args, environ) if show_info: print('RunCommands', __version__) if debug: print() printer.debug('Commands module:', commands_module) printer.debug('Config file:', config_file) printer.debug('All args:', all_argv) printer.debug('Run args:', run_argv) printer.debug('Command args:', command_argv) items = ( ('Globals:', globals_), ('Default args:', default_args), ('Environment variables:', environ), ) for label, data in items: if data: printer.debug(label) for k in sorted(data): v = data[k] printer.debug(' - {k} = {v!r}'.format_map(locals())) if environ: os.environ.update(environ) collection.set_attrs(debug=debug) collection.set_default_args(default_args) runner = CommandRunner(collection, debug) if print_and_exit: if list_commands: runner.print_usage() elif not command_argv: printer.warning('\nNo command(s) specified') runner.print_usage() else: runner.run(command_argv)
Run one or more commands in succession. For example, assume the commands ``local`` and ``remote`` have been defined; the following will run ``ls`` first on the local host and then on the remote host:: runcommands local ls remote <host> ls When a command name is encountered in ``argv``, it will be considered the starting point of the next command *unless* the previous item in ``argv`` was an option like ``--xyz`` that expects a value (i.e., it's not a flag). To avoid ambiguity when an option value matches a command name, the value can be prepended with a colon to force it to be considered a value and not a command name.
def liste_campagnes(self, campagne=None): """ Liste des campagnes de mesure et des stations associées Paramètres: campagne: Si définie, liste des stations que pour cette campagne """ condition = "" if campagne: condition = "WHERE NOM_COURT_CM='%s' """ % campagne _sql = """SELECT NOM_COURT_CM AS CAMPAGNE, IDENTIFIANT AS STATION, LIBELLE AS LIBELLE_CM, DATEDEB AS DEBUT, DATEFIN AS FIN FROM CAMPMES INNER JOIN CAMPMES_STATION USING (NOM_COURT_CM) INNER JOIN STATION USING (NOM_COURT_SIT) %s ORDER BY DATEDEB DESC""" % condition return psql.read_sql(_sql, self.conn)
Liste des campagnes de mesure et des stations associées Paramètres: campagne: Si définie, liste des stations que pour cette campagne
def ecef2ned(x: float, y: float, z: float, lat0: float, lon0: float, h0: float, ell: Ellipsoid = None, deg: bool = True) -> Tuple[float, float, float]: """ Convert ECEF x,y,z to North, East, Down Parameters ---------- x : float or numpy.ndarray of float ECEF x coordinate (meters) y : float or numpy.ndarray of float ECEF y coordinate (meters) z : float or numpy.ndarray of float ECEF z coordinate (meters) lat0 : float Observer geodetic latitude lon0 : float Observer geodetic longitude h0 : float observer altitude above geodetic ellipsoid (meters) ell : Ellipsoid, optional reference ellipsoid deg : bool, optional degrees input/output (False: radians in/out) Results ------- n : float or numpy.ndarray of float North NED coordinate (meters) e : float or numpy.ndarray of float East NED coordinate (meters) d : float or numpy.ndarray of float Down NED coordinate (meters) """ e, n, u = ecef2enu(x, y, z, lat0, lon0, h0, ell, deg=deg) return n, e, -u
Convert ECEF x,y,z to North, East, Down Parameters ---------- x : float or numpy.ndarray of float ECEF x coordinate (meters) y : float or numpy.ndarray of float ECEF y coordinate (meters) z : float or numpy.ndarray of float ECEF z coordinate (meters) lat0 : float Observer geodetic latitude lon0 : float Observer geodetic longitude h0 : float observer altitude above geodetic ellipsoid (meters) ell : Ellipsoid, optional reference ellipsoid deg : bool, optional degrees input/output (False: radians in/out) Results ------- n : float or numpy.ndarray of float North NED coordinate (meters) e : float or numpy.ndarray of float East NED coordinate (meters) d : float or numpy.ndarray of float Down NED coordinate (meters)
def write_json(path, params): """Write local or remote.""" logger.debug("write %s to %s", params, path) if path.startswith("s3://"): bucket = get_boto3_bucket(path.split("/")[2]) key = "/".join(path.split("/")[3:]) logger.debug("upload %s", key) bucket.put_object( Key=key, Body=json.dumps(params, sort_keys=True, indent=4) ) else: makedirs(os.path.dirname(path)) with open(path, 'w') as dst: json.dump(params, dst, sort_keys=True, indent=4)
Write local or remote.
def username(self, value=None): """ Return or set the username :param string value: the new username to use :returns: string or new :class:`URL` instance """ if value is not None: return URL._mutate(self, username=value) return unicode_unquote(self._tuple.username)
Return or set the username :param string value: the new username to use :returns: string or new :class:`URL` instance
def is_playing_line_in(self): """bool: Is the speaker playing line-in?""" response = self.avTransport.GetPositionInfo([ ('InstanceID', 0), ('Channel', 'Master') ]) track_uri = response['TrackURI'] return re.match(r'^x-rincon-stream:', track_uri) is not None
bool: Is the speaker playing line-in?
def UpdateCronJob(self, cronjob_id, last_run_status=unchanged, last_run_time=unchanged, current_run_id=unchanged, state=unchanged, forced_run_requested=unchanged): """Updates run information for an existing cron job. Args: cronjob_id: The id of the cron job to update. last_run_status: A CronJobRunStatus object. last_run_time: The last time a run was started for this cron job. current_run_id: The id of the currently active run. state: The state dict for stateful cron jobs. forced_run_requested: A boolean indicating if a forced run is pending for this job. Raises: UnknownCronJobError: A cron job with the given id does not exist. """
Updates run information for an existing cron job. Args: cronjob_id: The id of the cron job to update. last_run_status: A CronJobRunStatus object. last_run_time: The last time a run was started for this cron job. current_run_id: The id of the currently active run. state: The state dict for stateful cron jobs. forced_run_requested: A boolean indicating if a forced run is pending for this job. Raises: UnknownCronJobError: A cron job with the given id does not exist.
def get_candidate_election(self, election): """Get a CandidateElection.""" return CandidateElection.objects.get(candidate=self, election=election)
Get a CandidateElection.
def enforce_dependencies(cls, functions, kind): ''' This is a class global method to enforce the dependencies that you currently know about. It will modify the "functions" dict and remove/replace modules that are missing dependencies. ''' for dependency, dependent_dict in six.iteritems(cls.dependency_dict[kind]): for (mod_name, func_name), (frame, params) in six.iteritems(dependent_dict): if 'retcode' in params or 'nonzero_retcode' in params: try: retcode = cls.run_command(dependency, mod_name, func_name) except OSError as exc: if exc.errno == errno.ENOENT: log.trace( 'Failed to run command %s, %s not found', dependency, exc.filename ) else: log.trace( 'Failed to run command \'%s\': %s', dependency, exc ) retcode = -1 if 'retcode' in params: if params['retcode'] == retcode: continue elif 'nonzero_retcode' in params: if params['nonzero_retcode']: if retcode != 0: continue else: if retcode == 0: continue # check if dependency is loaded elif dependency is True: log.trace( 'Dependency for %s.%s exists, not unloading', mod_name, func_name ) continue # check if you have the dependency elif dependency in frame.f_globals \ or dependency in frame.f_locals: log.trace( 'Dependency (%s) already loaded inside %s, skipping', dependency, mod_name ) continue log.trace( 'Unloading %s.%s because dependency (%s) is not met', mod_name, func_name, dependency ) # if not, unload the function if frame: try: func_name = frame.f_globals['__func_alias__'][func_name] except (AttributeError, KeyError): pass mod_key = '{0}.{1}'.format(mod_name, func_name) # if we don't have this module loaded, skip it! if mod_key not in functions: continue try: fallback_function = params.get('fallback_function') if fallback_function is not None: functions[mod_key] = fallback_function else: del functions[mod_key] except AttributeError: # we already did??? log.trace('%s already removed, skipping', mod_key) continue
This is a class global method to enforce the dependencies that you currently know about. It will modify the "functions" dict and remove/replace modules that are missing dependencies.