code
stringlengths
4
4.48k
docstring
stringlengths
1
6.45k
_id
stringlengths
24
24
def _match(self, check): <NEW_LINE> <INDENT> matches = [] <NEW_LINE> tests = {} <NEW_LINE> for k, v in check.items(): <NEW_LINE> <INDENT> if isinstance(v, dict): <NEW_LINE> <INDENT> tests[k] = CompositeFilter(v) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> tests[k] = lambda o: _add_tz(o) == _add_tz(v) <NEW_LINE> <DEDENT> <DEDENT> for rec in self._records.values(): <NEW_LINE> <INDENT> if self._match_one(rec, tests): <NEW_LINE> <INDENT> matches.append(deepcopy(rec)) <NEW_LINE> <DEDENT> <DEDENT> return matches
Find all the matches for a check dict.
625941b6627d3e7fe0d68c59
def getDrives(self): <NEW_LINE> <INDENT> cache, key, data = self.getCacheKey('drive-list') <NEW_LINE> if not data: <NEW_LINE> <INDENT> sharepoint = self.getSharepoint() <NEW_LINE> list_o_drives = [('', 'None'),] <NEW_LINE> if sharepoint: <NEW_LINE> <INDENT> site = self.getSharepointSite() <NEW_LINE> if site: <NEW_LINE> <INDENT> for d in site.getDrives(): <NEW_LINE> <INDENT> list_o_drives.append((d.id, d.name + ' ' + d.webUrl )) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> data = atapi.DisplayList(list_o_drives) <NEW_LINE> cache[key] = data <NEW_LINE> <DEDENT> return data
return List of Drives
625941b6377c676e91271fb5
def get_password(text): <NEW_LINE> <INDENT> return getpass.getpass(text)
Function that prompt user for password return string
625941b63317a56b86939a75
def present(name, user='root', minute='*', hour='*', daymonth='*', month='*', dayweek='*', comment=None, identifier=None): <NEW_LINE> <INDENT> name = ' '.join(name.strip().split()) <NEW_LINE> if not identifier: <NEW_LINE> <INDENT> identifier = SALT_CRON_NO_IDENTIFIER <NEW_LINE> <DEDENT> ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} <NEW_LINE> if __opts__['test']: <NEW_LINE> <INDENT> status = _check_cron(user, cmd=name, minute=minute, hour=hour, daymonth=daymonth, month=month, dayweek=dayweek, comment=comment, identifier=identifier) <NEW_LINE> ret['result'] = None <NEW_LINE> if status == 'absent': <NEW_LINE> <INDENT> ret['comment'] = 'Cron {0} is set to be added'.format(name) <NEW_LINE> <DEDENT> elif status == 'present': <NEW_LINE> <INDENT> ret['result'] = True <NEW_LINE> ret['comment'] = 'Cron {0} already present'.format(name) <NEW_LINE> <DEDENT> elif status == 'update': <NEW_LINE> <INDENT> ret['comment'] = 'Cron {0} is set to be updated'.format(name) <NEW_LINE> <DEDENT> return ret <NEW_LINE> <DEDENT> data = __salt__['cron.set_job'](user=user, minute=minute, hour=hour, daymonth=daymonth, month=month, dayweek=dayweek, cmd=name, comment=comment, identifier=identifier) <NEW_LINE> if data == 'present': <NEW_LINE> <INDENT> ret['comment'] = 'Cron {0} already present'.format(name) <NEW_LINE> return ret <NEW_LINE> <DEDENT> if data == 'new': <NEW_LINE> <INDENT> ret['comment'] = 'Cron {0} added to {1}\'s crontab'.format(name, user) <NEW_LINE> ret['changes'] = {user: name} <NEW_LINE> return ret <NEW_LINE> <DEDENT> if data == 'updated': <NEW_LINE> <INDENT> ret['comment'] = 'Cron {0} updated'.format(name, user) <NEW_LINE> ret['changes'] = {user: name} <NEW_LINE> return ret <NEW_LINE> <DEDENT> ret['comment'] = ('Cron {0} for user {1} failed to commit with error \n{2}' .format(name, user, data)) <NEW_LINE> ret['result'] = False <NEW_LINE> return ret
Verifies that the specified cron job is present for the specified user. For more advanced information about what exactly can be set in the cron timing parameters, check your cron system's documentation. Most Unix-like systems' cron documentation can be found via the crontab man page: ``man 5 crontab``. name The command that should be executed by the cron job. user The name of the user whose crontab needs to be modified, defaults to the root user minute The information to be set into the minute section, this can be any string supported by your cron system's the minute field. Default is ``*`` hour The information to be set in the hour section. Default is ``*`` daymonth The information to be set in the day of month section. Default is ``*`` month The information to be set in the month section. Default is ``*`` dayweek The information to be set in the day of week section. Default is ``*`` comment User comment to be added on line previous the cron job identifier Custom-defined identifier for tracking the cron line for future crontab edits. This defaults to the state id
625941b68e71fb1e9831d5b8
def _fill_var_subdirectories(self): <NEW_LINE> <INDENT> mkdirChain(self._sysroot + '/var/lib') <NEW_LINE> self._create_tmpfiles('/var/home') <NEW_LINE> self._create_tmpfiles('/var/roothome') <NEW_LINE> self._create_tmpfiles('/var/lib/rpm') <NEW_LINE> self._create_tmpfiles('/var/opt') <NEW_LINE> self._create_tmpfiles('/var/srv') <NEW_LINE> self._create_tmpfiles('/var/usrlocal') <NEW_LINE> self._create_tmpfiles('/var/mnt') <NEW_LINE> self._create_tmpfiles('/var/media') <NEW_LINE> self._create_tmpfiles('/var/spool') <NEW_LINE> self._create_tmpfiles('/var/spool/mail')
Add subdirectories to /var Once we have /var, start filling in any directories that may be required later there. We explicitly make /var/lib, since systemd-tmpfiles doesn't have a --prefix-only=/var/lib. We rely on 80-setfilecons.ks to set the label correctly. Next, run tmpfiles to make subdirectories of /var. We need this for both mounts like /home (really /var/home) and %post scripts might want to write to e.g. `/srv`, `/root`, `/usr/local`, etc. The /var/lib/rpm symlink is also critical for having e.g. `rpm -qa` work in %post. We don't iterate *all* tmpfiles because we don't have the matching NSS configuration inside Anaconda, and we can't "chroot" to get it because that would require mounting the API filesystems in the target.
625941b6a05bb46b383ec638
def on_post_build(self, config): <NEW_LINE> <INDENT> print("INFO - Writing source zip file to {}".format(self._source_zip_file_path)) <NEW_LINE> os.makedirs(os.path.dirname(self._source_zip_file_path), exist_ok=True) <NEW_LINE> download_dir = TemporaryDirectory() <NEW_LINE> with ZipFile(self._source_zip_file_path, "w") as source_zip: <NEW_LINE> <INDENT> for source_file in self._source_files.keys(): <NEW_LINE> <INDENT> file_location = self._source_files[source_file] <NEW_LINE> if "://" in self._source_files[source_file]: <NEW_LINE> <INDENT> self._print_message(config, "INFO - Downloading {}".format(file_location)) <NEW_LINE> try: <NEW_LINE> <INDENT> file_location = self._download_image(download_dir.name, file_location) <NEW_LINE> <DEDENT> except (urllib.request.HTTPError, urllib.error.URLError) as error: <NEW_LINE> <INDENT> self._print_message(config, "ERROR - Failed to download {} - {}".format(file_location, error)) <NEW_LINE> file_location = None <NEW_LINE> <DEDENT> <DEDENT> if file_location is not None: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> source_zip.write(file_location, source_file) <NEW_LINE> <DEDENT> except FileNotFoundError: <NEW_LINE> <INDENT> self._print_message(config, "WARNING - File not found when writing to zip - {}".format(file_location))
Create the zip file. Download any external images (if required).
625941b69c8ee82313fbb580
def SCardListReaders(self, hContext, ReaderGroup): <NEW_LINE> <INDENT> if hContext not in self.contexts: <NEW_LINE> <INDENT> return scard.SCARD_E_INVALID_HANDLE, [] <NEW_LINE> <DEDENT> return scard.SCARD_S_SUCCESS, [self.reader.name]
our only reader name
625941b63346ee7daa2b2b74
def get_parts_completed(self, experiment_session): <NEW_LINE> <INDENT> return self.filter(experiment_session=experiment_session) .exclude(date_completed__isnull=True).order_by('part')
Return the number of completed parts in the experiment_session. This is all parts that are listed minus those with date_completed is null.
625941b660cbc95b062c6354
def find_by_name(self, name): <NEW_LINE> <INDENT> name = name.encode('ascii') <NEW_LINE> index = ctypes.c_int() <NEW_LINE> check(gp.gp_list_find_by_name(self._ptr, PTR(index), name)) <NEW_LINE> return str(index.value, encoding='ascii')
:param name: str :rtype: str
625941b6b830903b967e9723
def collate_fn(batch): <NEW_LINE> <INDENT> all_input_ids, all_input_mask, all_segment_ids, all_label_ids,all_input_lens = map(torch.stack, zip(*batch)) <NEW_LINE> max_len = max(all_input_lens).item() <NEW_LINE> all_input_ids = all_input_ids[:, :max_len] <NEW_LINE> all_input_mask = all_input_mask[:, :max_len] <NEW_LINE> all_segment_ids = all_segment_ids[:, :max_len] <NEW_LINE> return all_input_ids, all_input_mask, all_segment_ids, all_label_ids
batch should be a list of (sequence, target, length) tuples... Returns a padded tensor of sequences sorted from longest to shortest,
625941b629b78933be1e54c5
def maximalRectangle(self, matrix): <NEW_LINE> <INDENT> if(len(matrix))==0 or len(matrix[0])==0: <NEW_LINE> <INDENT> return 0 <NEW_LINE> <DEDENT> row_cnt=len(matrix) <NEW_LINE> col_cnt=len(matrix[0]) <NEW_LINE> area = [int(i) for i in matrix[0]] <NEW_LINE> max_area=0 <NEW_LINE> for i in range(0, row_cnt): <NEW_LINE> <INDENT> if i!=0: <NEW_LINE> <INDENT> for j in range(col_cnt): <NEW_LINE> <INDENT> if(int(matrix[i][j])!=0): <NEW_LINE> <INDENT> area[j]=area[j]+int(matrix[i][j]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> area[j]=0 <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> max_area=max(max_area, self.histogram_area_fn(area)) <NEW_LINE> <DEDENT> return max_area
:type matrix: List[List[str]] :rtype: int
625941b68e05c05ec3eea17c
def convert_per_connection_data_to_rows( self, connection_row_indices, n_rows, data, max_n_synapses): <NEW_LINE> <INDENT> return [ data[connection_row_indices == i][:max_n_synapses].reshape(-1) for i in range(n_rows)]
Converts per-connection data generated from connections into row-based data to be returned from get_synaptic_data :param ~numpy.ndarray connection_row_indices: The index of the row that each item should go into :param int n_rows: The number of rows :param ~numpy.ndarray data: The non-row-based data :param int max_n_synapses: The maximum number of synapses to generate in each row :rtype: list(~numpy.ndarray)
625941b6091ae35668666d71
def test_gemm_non_contiguous(self): <NEW_LINE> <INDENT> aval = np.ones((6, 2)) <NEW_LINE> bval = np.ones((2, 7)) <NEW_LINE> cval = np.arange(7) + np.arange(0, .6, .1)[:, np.newaxis] <NEW_LINE> a = theano.shared(aval[:3], borrow=True) <NEW_LINE> b = theano.shared(bval[:, :5], borrow=True) <NEW_LINE> c = theano.shared(cval[:3, :5], borrow=True) <NEW_LINE> s = theano.tensor.scalar() <NEW_LINE> upd_c = s * c + theano.tensor.dot(a, b) <NEW_LINE> f = theano.function([s], [], updates={c: upd_c}) <NEW_LINE> f(0) <NEW_LINE> ref_output = np.ones((3, 5)) * 2 <NEW_LINE> unittest_tools.assert_allclose(c.get_value(), ref_output)
test_gemm_non_contiguous: Test if GEMM works well with non-contiguous matrices.
625941b6d7e4931a7ee9dd27
def insert(self, index, item): <NEW_LINE> <INDENT> super(ObservableList, self).insert(index, item) <NEW_LINE> length = len(self) <NEW_LINE> if index >= length: <NEW_LINE> <INDENT> index = length - 1 <NEW_LINE> <DEDENT> elif index < 0: <NEW_LINE> <INDENT> index += length - 1 <NEW_LINE> if index < 0: <NEW_LINE> <INDENT> index = 0 <NEW_LINE> <DEDENT> <DEDENT> self._notify_add_at(index)
See list.insert.
625941b691f36d47f21ac301
def translations(self, **kwargs): <NEW_LINE> <INDENT> path = self._get_id_path('translations') <NEW_LINE> response = self._GET(path, kwargs) <NEW_LINE> self._set_attrs_to_values(response) <NEW_LINE> return response
Get the list of translations that exist for a TV series. These translations cascade down to the episode level. Returns: A dict respresentation of the JSON returned from the API.
625941b650812a4eaa59c131
def readDict(s, start=0): <NEW_LINE> <INDENT> d = [] <NEW_LINE> keyLen, start = binaryToVal(s, 4, start) <NEW_LINE> keyStr = s[:keyLen].decode('ascii').rstrip() <NEW_LINE> keys = keyStr.split(',') <NEW_LINE> start += keyLen <NEW_LINE> valBytes, start = binaryToVal(s, 1, start) <NEW_LINE> for k in keys: <NEW_LINE> <INDENT> v, start = binaryToVal(s, valBytes, start) <NEW_LINE> d.append((k,v)) <NEW_LINE> <DEDENT> return d
Generic method to read a dictionary with string keys and integer values in the format written by writeDict()
625941b60c0af96317bb7ff5
def get_session(host, key, secret, secure=True, port=None, read_version='v2'): <NEW_LINE> <INDENT> endpoint = HTTPEndpoint(host, key, secret, secure, port) <NEW_LINE> return Client(endpoint, read_version=read_version)
Get a :class:`tempoiq.client.Client` instance with the given session information. :param String host: Backend's base URL, in the form "your-host.backend.tempoiq.com". For legacy reasons, it is also possible to prepend the URL schema, but this will be deprecated in the future. :param String key: API key :param String secret: API secret :rtype: :class:`tempoiq.client.Client`
625941b6e8904600ed9f1d34
def setUp(self): <NEW_LINE> <INDENT> self.app = create_app() <NEW_LINE> self.client = self.app.test_client <NEW_LINE> self.database_name = "bookshelf_test" <NEW_LINE> self.database_path = "postgres://{}:{}@{}/{}".format('student', 'student','localhost:5432', self.database_name) <NEW_LINE> setup_db(self.app, self.database_path) <NEW_LINE> self.new_book = { 'title': 'Anansi Boys', 'author': 'Neil Gaiman', 'rating': 5 } <NEW_LINE> with self.app.app_context(): <NEW_LINE> <INDENT> self.db = SQLAlchemy() <NEW_LINE> self.db.init_app(self.app) <NEW_LINE> self.db.create_all()
Define test variables and initialize app.
625941b65fc7496912cc3791
def set_logger(options, logname='fms'): <NEW_LINE> <INDENT> if isinstance(options, str): <NEW_LINE> <INDENT> loglevel = options <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> loglevel = 'error' <NEW_LINE> if options.verbose: <NEW_LINE> <INDENT> loglevel = 'info' <NEW_LINE> <DEDENT> if options.loglevel: <NEW_LINE> <INDENT> loglevel = options.loglevel <NEW_LINE> <DEDENT> <DEDENT> levels = {'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARNING, 'error': logging.ERROR, 'critical': logging.CRITICAL,} <NEW_LINE> logger = logging.getLogger(logname) <NEW_LINE> lhandler = logging.StreamHandler() <NEW_LINE> formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s") <NEW_LINE> lhandler.setFormatter(formatter) <NEW_LINE> logger.addHandler(lhandler) <NEW_LINE> logger.setLevel(levels[loglevel]) <NEW_LINE> return logger
Sets main logger instance.
625941b6ec188e330fd5a5b3
def p_arrsizes(p): <NEW_LINE> <INDENT> if len(p) == 3: <NEW_LINE> <INDENT> p[0] = [(p[1], p.lineno(1))] + p[2] <NEW_LINE> <DEDENT> elif p[1] == None: <NEW_LINE> <INDENT> p[0] = [] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> p[0] = [(p[1], p.lineno(1))]
arrsizes : EXPR_IDX arrsizes | EXPR_IDX | empty
625941b6dc8b845886cb5340
def read_nuclear_powerplant(powerplant_filepath: str, carbon_emission_filepath: str, population_filepath: str) -> List[List]: <NEW_LINE> <INDENT> country = common_country(powerplant_filepath, carbon_emission_filepath, population_filepath) <NEW_LINE> nuclear = [[], []] <NEW_LINE> powerplant = read_powerplant_data(powerplant_filepath) <NEW_LINE> data_so_far = {'country': powerplant['country'], 'type': powerplant['type']} <NEW_LINE> for x in range(0, len(data_so_far['country'])): <NEW_LINE> <INDENT> if data_so_far['type'][x] == 'Nuclear': <NEW_LINE> <INDENT> if data_so_far['country'][x] in nuclear[0]: <NEW_LINE> <INDENT> nuclear[1][-1] = nuclear[1][-1] + 1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> list.append(nuclear[0], data_so_far['country'][x]) <NEW_LINE> list.append(nuclear[1], 1) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> nuclear_power = [[], []] <NEW_LINE> for x in range(0, len(nuclear[0])): <NEW_LINE> <INDENT> if nuclear[0][x] in country: <NEW_LINE> <INDENT> nuclear_power[0].append(nuclear[0][x]) <NEW_LINE> nuclear_power[1].append(nuclear[1][x]) <NEW_LINE> <DEDENT> <DEDENT> population = read_pop_data(population_filepath) <NEW_LINE> pop = [[], []] <NEW_LINE> for x in range(0, len(population['country'])): <NEW_LINE> <INDENT> if population['country'][x] in nuclear[0]: <NEW_LINE> <INDENT> pop[0].append(population['country'][x]) <NEW_LINE> pop[1].append(population['population'][x]) <NEW_LINE> <DEDENT> <DEDENT> actual_data = [[], []] <NEW_LINE> for x in range(0, len(nuclear_power[0])): <NEW_LINE> <INDENT> list.append(actual_data[0], pop[0][x]) <NEW_LINE> list.append(actual_data[1], nuclear_power[1][x] / pop[1][x]) <NEW_LINE> <DEDENT> return actual_data
Return the country name and the number of nuclear powerplant per capita in a country. If the country has no nuclear powerplant, it is not included in the output Precondition: - the powerplant_filepath refers to the powerplant csv file - the carbon_emission_filepath refers to the carbon emission csv file - the population_filepath refers to the populatioin csv file >>> nuclear = read_nuclear_powerplant('global_power_plant_database.csv', 'owid-co2-data.csv', 'countries of the world.csv') >>> len(nuclear[0]) == 30 True
625941b67c178a314d6ef263
def __init__(self, channel): <NEW_LINE> <INDENT> self.GetImageStream = channel.unary_stream( '/ImageService/GetImageStream', request_serializer=eval__server__pb2.String.SerializeToString, response_deserializer=eval__server__pb2.Frame.FromString, ) <NEW_LINE> self.GetShm = channel.unary_unary( '/ImageService/GetShm', request_serializer=eval__server__pb2.Empty.SerializeToString, response_deserializer=eval__server__pb2.String.FromString, )
Constructor. Args: channel: A grpc.Channel.
625941b6d164cc6175782b59
def get_username_from_update(update): <NEW_LINE> <INDENT> if update.message.from_user.username is not None: <NEW_LINE> <INDENT> username = update.message.from_user.username <NEW_LINE> <DEDENT> elif update.message.from_user.first_name is not None: <NEW_LINE> <INDENT> username = update.message.from_user.first_name <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> username = None <NEW_LINE> <DEDENT> return username
Return the possible username or none if fails
625941b6090684286d50eaeb
def document_topics(self): <NEW_LINE> <INDENT> return normalize((self.f['p_d_z'][:] * self.f['p_z'][np.newaxis,:]).T, axis=0)
Compute the probabilities of documents belonging to topics. Return: a Z x D matrix of P(z|d) probabilities. Note: This can be seen as a dimensionality reduction since a Z x D matrix is obtained from a V x D matrix, where Z << V.
625941b6e64d504609d7464c
def open_nodzgraph(self): <NEW_LINE> <INDENT> pass
to patch in the Nodzgraph class Returns:
625941b68da39b475bd64d82
def check_condtions(self): <NEW_LINE> <INDENT> pass
Function resposible for determining whether or not another state is required. Function is called everytime the entity is processed. By default this function does nothing, subclasses determine there own functionality.
625941b6d18da76e235322dd
def test_allowed_file(self): <NEW_LINE> <INDENT> good_names = ['1.jpg', '2.JPG', '3.png', '4.PNG', '5.jpeg', '6.JPEG'] <NEW_LINE> bad_names = ['1', '2.jpg.2', '3.txt', ''] <NEW_LINE> for name in good_names: <NEW_LINE> <INDENT> self.assertTrue(_allowed_file(name)) <NEW_LINE> <DEDENT> for name in bad_names: <NEW_LINE> <INDENT> self.assertFalse(_allowed_file(name))
Tests that only certain image file types are allowed.
625941b6d8ef3951e3243349
def _create_invoice_tax(self, invoice_vat_book_line_id, invoice_id, tax_code_ids): <NEW_LINE> <INDENT> invoice_tax_lines_obj = self.env['l10n.es.vat.book.invoice.tax.lines'] <NEW_LINE> for invoice_tax_line in invoice_id.tax_line: <NEW_LINE> <INDENT> tax_code = invoice_tax_line.mapped('base_code_id.id') <NEW_LINE> if tax_code != [] and set(tax_code) < set(tax_code_ids.ids): <NEW_LINE> <INDENT> vals = self._vals_invoice_tax(invoice_tax_line) <NEW_LINE> if invoice_id.type in ('out_invoice'): <NEW_LINE> <INDENT> vals.update({ 'issued_invoice_line_id': invoice_vat_book_line_id.id }) <NEW_LINE> invoice_tax_lines_obj.create(vals) <NEW_LINE> <DEDENT> elif invoice_id.type in ('in_invoice'): <NEW_LINE> <INDENT> vals.update({ 'received_invoice_line_id': invoice_vat_book_line_id.id }) <NEW_LINE> invoice_tax_lines_obj.create(vals) <NEW_LINE> <DEDENT> elif invoice_id.type in ('out_refund'): <NEW_LINE> <INDENT> vals.update({ 'rectification_issued_invoice_line_id': invoice_vat_book_line_id.id }) <NEW_LINE> invoice_tax_lines_obj.create(vals) <NEW_LINE> <DEDENT> elif invoice_id.type in ('in_refund'): <NEW_LINE> <INDENT> vals.update({ 'rectification_received_invoice_line_id': invoice_vat_book_line_id.id }) <NEW_LINE> invoice_tax_lines_obj.create(vals) <NEW_LINE> <DEDENT> self._invoices_summary(invoice_tax_line, invoice_id.type)
This function create a l10n.es.vat.book.invoice.tax.lines for the current issued invoice, Received Invoices, Rectification invoices Args: invoice_vat_book_line_id (obj): l10n.es.vat.book.issued.lines or l10n.es.vat.book.received.lines or l10n.es.vat.book.rectification.lines invoice_id (obj): Invoice tax_code_ids (obj): account.tax.code from the vat book Returns: bool: True if successful, False otherwise.
625941b6adb09d7d5db6c59f
def test_search_related_report_event_entities(self): <NEW_LINE> <INDENT> pass
Test case for search_related_report_event_entities List the related events over a firing event # noqa: E501
625941b6656771135c3eb67e
def expirememberat(self, key, subkey, timestamp): <NEW_LINE> <INDENT> return self.execute_command('EXPIREMEMBERAT', key, subkey, timestamp)
Set timeout on a subkey by timestamp instead of seconds https://docs.keydb.dev/docs/commands/#expirememberat :param key: :param subkey: :param timestamp: :return:
625941b68c0ade5d55d3e7cb
def initLeo (self): <NEW_LINE> <INDENT> trace = False <NEW_LINE> if not self.isValidPython(): return <NEW_LINE> try: <NEW_LINE> <INDENT> import leo.core.leoGlobals as leoGlobals <NEW_LINE> <DEDENT> except ImportError: <NEW_LINE> <INDENT> print("Error importing leoGlobals.py") <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> import leo.core.leoApp as leoApp <NEW_LINE> leoGlobals.app = leoApp.LeoApp() <NEW_LINE> <DEDENT> except ImportError: <NEW_LINE> <INDENT> print("Error importing leoApp.py") <NEW_LINE> <DEDENT> self.g = g = leoGlobals <NEW_LINE> assert(g.app) <NEW_LINE> g.app.leoID = None <NEW_LINE> g.app.silentMode = self.silent <NEW_LINE> if trace: <NEW_LINE> <INDENT> import sys <NEW_LINE> g.trace(sys.argv) <NEW_LINE> g.trace('g.app.silentMode',g.app.silentMode) <NEW_LINE> <DEDENT> import leo.core.leoPlugins as leoPlugins <NEW_LINE> leoPlugins.init() <NEW_LINE> try: <NEW_LINE> <INDENT> import leo.core.leoNodes as leoNodes <NEW_LINE> <DEDENT> except ImportError: <NEW_LINE> <INDENT> print("Error importing leoNodes.py") <NEW_LINE> import traceback ; traceback.print_exc() <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> import leo.core.leoConfig as leoConfig <NEW_LINE> <DEDENT> except ImportError: <NEW_LINE> <INDENT> print("Error importing leoConfig.py") <NEW_LINE> import traceback ; traceback.print_exc() <NEW_LINE> <DEDENT> leoGlobals.g = leoGlobals <NEW_LINE> g.app.recentFilesManager = leoApp.RecentFilesManager() <NEW_LINE> g.app.loadManager = lm = leoApp.LoadManager() <NEW_LINE> g.app.loadManager.computeStandardDirectories() <NEW_LINE> if not self.getLeoID(): return <NEW_LINE> g.app.inBridge = True <NEW_LINE> g.app.nodeIndices = leoNodes.nodeIndices(g.app.leoID) <NEW_LINE> g.app.config = leoConfig.GlobalConfigManager() <NEW_LINE> if self.readSettings: <NEW_LINE> <INDENT> lm.readGlobalSettingsFiles() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> settings_d,shortcuts_d = lm.createDefaultSettingsDicts() <NEW_LINE> lm.globalSettingsDict = settings_d <NEW_LINE> lm.globalShortcutsDict = shortcuts_d <NEW_LINE> <DEDENT> self.createGui() <NEW_LINE> if self.verbose: self.reportDirectories() <NEW_LINE> self.adjustSysPath() <NEW_LINE> if not self.loadPlugins: <NEW_LINE> <INDENT> def dummyDoHook(tag,*args,**keys): <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> g.doHook = dummyDoHook <NEW_LINE> <DEDENT> g.doHook("start1") <NEW_LINE> g.app.computeSignon() <NEW_LINE> g.app.initing = False <NEW_LINE> g.doHook("start2",c=None,p=None,v=None,fileName=None)
Init the Leo app to which this class gives access. This code is based on leo.run().
625941b663d6d428bbe442fb
def orderlyQueue(s: str, k: int) -> str: <NEW_LINE> <INDENT> if k == 1: <NEW_LINE> <INDENT> return min(s[i:] + s[:i] for i in range(len(s))) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return "".join(sorted(s))
When k == 1 only rotation is possible. When k > 1 any permutation is possible. Time: O(n * n) Space: O(n)
625941b6627d3e7fe0d68c5a
def cosine_similarity(X, Y): <NEW_LINE> <INDENT> return (X @ Y.T) / np.sqrt(np.nansum(np.power(X, 2), axis=1) * np.nansum(np.power(Y, 2), axis=1))
行列X,Yの列ベクトルと行ベクトルのコサイン類似度をまとめて計算し,コサイン類似度を並べたリストを出力
625941b65fdd1c0f98dc003d
def load(path): <NEW_LINE> <INDENT> file_paths = _scan_up_all(path, _DEFAULT_NAME) <NEW_LINE> file_paths.append(os.path.expanduser('~/%s' % _DEFAULT_NAME)) <NEW_LINE> config = ConfigParser.SafeConfigParser() <NEW_LINE> config.read(file_paths) <NEW_LINE> return config
Loads all config files, including those up the directory path and in the user profile path. Args: path: Path to search for the config file. Returns: An initialized Config object or None if no config was found.
625941b6925a0f43d2549c7f
def check_validity_and_test(args): <NEW_LINE> <INDENT> expr, expected = args <NEW_LINE> local_res = is_valid(expr) <NEW_LINE> return local_res == expected
Checks expression and compare the outcome against a known value.
625941b6091ae35668666d72
def create_sqs_no_messages_alarm(): <NEW_LINE> <INDENT> cloudwatch_client = boto3.client("cloudwatch") <NEW_LINE> cloudwatch_client.put_metric_alarm( AlarmName=current_app.config["HEARTBEAT_ALARM_NAME"], AlarmDescription="therm heartbeat stopped!", ActionsEnabled=True, OKActions=[_arn("sns", current_app.config["SNS_TOPIC_NAME"])], AlarmActions=[_arn("sns", current_app.config["SNS_TOPIC_NAME"])], InsufficientDataActions=[_arn("sns", current_app.config["SNS_TOPIC_NAME"])], TreatMissingData="breaching", MetricName="NumberOfMessagesSent", Namespace="AWS/SQS", Statistic="Sum", Dimensions=[{"Name": "QueueName", "Value": current_app.config["SQS_QUEUE_NAME"]}], Period=current_app.config["ALARM_PERIOD"], Unit="Seconds", EvaluationPeriods=1, Threshold=1, ComparisonOperator="LessThanOrEqualToThreshold", )
Alarm when no SQS messages received by queue for one ALARM_PERIOD
625941b6e5267d203edcdaad
def execute_th(self, inputs): <NEW_LINE> <INDENT> zero_entity = Entity(name="0", is_constant=True) <NEW_LINE> two_entity = Entity(name="2", is_constant=True) <NEW_LINE> assumptions = [standard_logic_functions["BiggerOrEqual"].execute_lf([inputs[0], zero_entity]), standard_logic_functions["BiggerOrEqual"].execute_lf([inputs[1], zero_entity])] <NEW_LINE> a_and_b = standard_numerical_functions["add"].execute_nf([inputs[0], inputs[1]]) <NEW_LINE> a_time_b = standard_numerical_functions["mul"].execute_nf([inputs[0], inputs[1]]) <NEW_LINE> sqrt_ab = standard_numerical_functions["sqrt"].execute_nf([a_time_b]) <NEW_LINE> two_sqrt_ab = standard_numerical_functions["mul"].execute_nf([two_entity, sqrt_ab]) <NEW_LINE> conclusions = [standard_logic_functions["BiggerOrEqual"].execute_lf([a_and_b, two_sqrt_ab])] <NEW_LINE> extra_entities = [a_and_b, a_time_b, sqrt_ab, two_sqrt_ab] <NEW_LINE> return {"Assumptions": assumptions, "Conclusions": conclusions, "ExtraEntities": extra_entities}
If a, b >= 0, then a + b >= 2 * sqrt(ab) :param inputs: 2 inputs, [a, b] :return: dict(Assumptions, Conclusions and ExtraEntities)
625941b64d74a7450ccd3fcf
@pytest.yield_fixture(scope='function') <NEW_LINE> def db(app: Flask) -> Generator[SQLAlchemy, None, None]: <NEW_LINE> <INDENT> _db.app = app <NEW_LINE> yield _db
Flask-SQLAlchemy Fixture
625941b66fece00bbac2d547
def _expand_wiki_links(self, context, out_format, content): <NEW_LINE> <INDENT> def expand(match): <NEW_LINE> <INDENT> wiki_text = match.groups()[0] <NEW_LINE> link = extract_link(self.env, context, wiki_text) <NEW_LINE> if isinstance(link, Element): <NEW_LINE> <INDENT> href = link.attrib.get('href') <NEW_LINE> name = link.children <NEW_LINE> description = link.attrib.get('title', '') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> href = wiki_text <NEW_LINE> description = None <NEW_LINE> <DEDENT> if out_format == 'svg': <NEW_LINE> <INDENT> format = 'URL="javascript:window.parent.location.href=\'%s\'"' <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> format = 'URL="%s"' <NEW_LINE> <DEDENT> url = format % href <NEW_LINE> if description: <NEW_LINE> <INDENT> url += '\ntooltip="%s"' % description .replace('"', '').replace('\n', '') <NEW_LINE> <DEDENT> return url <NEW_LINE> <DEDENT> return re.sub(r'URL="(.*?)"', expand, content)
Expand TracLinks that follow all URL= patterns.
625941b650485f2cf553cba5
def prep_msg(self, msg): <NEW_LINE> <INDENT> self.msg_image = self.font.render(msg, True, self.text_color, self.button_color) <NEW_LINE> self.msg_image_rect = self.msg_image.get_rect() <NEW_LINE> self.msg_image_rect.center = self.rect.center
将msg旋绕为图像,并使其在按钮上居中
625941b6293b9510aa2c30a5
def handle_pic(self): <NEW_LINE> <INDENT> img_partial1 = self.img[500:1500].astype(int) <NEW_LINE> left_up_color = img_partial1[0, 0, :] <NEW_LINE> person_bottom_diff = pd.DataFrame(np.sqrt(np.square(img_partial1 - self.person_bottom_color).sum(axis=2))) <NEW_LINE> person_bottom_y = person_bottom_diff.where(person_bottom_diff < 10, np.nan).dropna(axis=0, how='all').index[-1] <NEW_LINE> person_bottom_x = np.array(person_bottom_diff.where(person_bottom_diff < 10, np.nan).dropna(axis=1, how='all').columns).mean().astype(int) <NEW_LINE> person_head_diff = pd.DataFrame(np.sqrt(np.square(img_partial1 - self.person_head_color).sum(axis=2))) <NEW_LINE> person_head_y = person_head_diff.where(person_head_diff < 10, np.nan).dropna(axis=0, how='all').index[0] <NEW_LINE> person_head_x = np.array(person_head_diff.where(person_head_diff < 10, np.nan).dropna(axis=1, how='all').columns).mean().astype(int) <NEW_LINE> person_reset = img_partial1[person_head_y - 5:person_bottom_y, person_head_x - 33:person_bottom_x + 33, :] <NEW_LINE> person_reset[:, :, :] = left_up_color <NEW_LINE> img_partial2 = img_partial1[:person_bottom_y, :] <NEW_LINE> dest_color = pd.DataFrame(np.sqrt(np.square(img_partial2 - left_up_color).sum(axis=2))) <NEW_LINE> dest_color2 = dest_color.where(dest_color > 25, np.nan).dropna(axis=0, how='all') <NEW_LINE> top_y = dest_color2.index[0] <NEW_LINE> top_x = np.array(dest_color2.iloc[0].dropna().index).mean().astype(int) <NEW_LINE> top_color = img_partial2[top_y, top_x] <NEW_LINE> top_diff = pd.DataFrame(np.sqrt(np.square(img_partial2 - top_color).sum(axis=2))) <NEW_LINE> top_diff2 = top_diff.where(top_diff < 5, np.nan) <NEW_LINE> if top_x > person_bottom_x: <NEW_LINE> <INDENT> hor_y = top_diff2.dropna(axis=1, how='all').iloc[:, -1].dropna().index[0] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> hor_y = top_diff2.dropna(axis=1, how='all').iloc[:, 0].dropna().index[0] <NEW_LINE> <DEDENT> dest_point_x, dest_point_y = top_x, hor_y <NEW_LINE> self.distance = np.sqrt(np.square(np.array([person_bottom_x, person_bottom_y]) - np.array([dest_point_x, dest_point_y])).sum())
处理图片,找到棋子和目标点的坐标计算距离
625941b671ff763f4b54949a
def test_cycle_length_5(self): <NEW_LINE> <INDENT> value = cycle_length(30011) <NEW_LINE> self.assertEqual(value, 91)
Calculate the cycle length of an arbitrary number.
625941b69c8ee82313fbb581
def field(self, key): <NEW_LINE> <INDENT> col_indx = _get_index(self.columns.names, key) <NEW_LINE> if self.columns[col_indx]._phantom: <NEW_LINE> <INDENT> warnings.warn( 'Field %r has a repeat count of 0 in its format code, ' 'indicating an empty field.' % key) <NEW_LINE> recformat = self.columns._recformats[col_indx].lstrip('0') <NEW_LINE> return np.array([], dtype=recformat) <NEW_LINE> <DEDENT> n_phantom = len([c for c in self.columns[:col_indx] if c._phantom]) <NEW_LINE> field_indx = col_indx - n_phantom <NEW_LINE> recformat = self._coldefs._recformats[col_indx] <NEW_LINE> base = self <NEW_LINE> while (isinstance(base, FITS_rec) and isinstance(base.base, np.recarray)): <NEW_LINE> <INDENT> base = base.base <NEW_LINE> <DEDENT> field = np.recarray.field(base, field_indx) <NEW_LINE> if self._convert[field_indx] is None: <NEW_LINE> <INDENT> if isinstance(recformat, _FormatP): <NEW_LINE> <INDENT> converted = self._convert_p(col_indx, field, recformat) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> converted = self._convert_other(col_indx, field, recformat) <NEW_LINE> <DEDENT> self._convert[field_indx] = converted <NEW_LINE> return converted <NEW_LINE> <DEDENT> return self._convert[field_indx]
A view of a `Column`'s data as an array.
625941b6046cf37aa974cb57
def test_initialization_required_arguments(self): <NEW_LINE> <INDENT> token = 'GitHub-API-Token' <NEW_LINE> repoOwner = Interpolate('owner') <NEW_LINE> repoName = Interpolate('name') <NEW_LINE> status = GitHubStatus( token=token, repoOwner=repoOwner, repoName=repoName) <NEW_LINE> self.assertEqual(repoOwner, status._repoOwner) <NEW_LINE> self.assertEqual(repoName, status._repoName) <NEW_LINE> self.assertEqual(status._sha, Interpolate("%(src::revision)s")) <NEW_LINE> self.assertEqual(status._startDescription, "Build started.") <NEW_LINE> self.assertEqual(status._endDescription, "Build done.")
Status can be initialized by only specifying GitHub API token and interpolation for repository's owner and name. All other arguments are initialized with default values.
625941b6796e427e537b03ce
def update_isvalid_column_in_DB(session): <NEW_LINE> <INDENT> print("INFO: Updating isvalid column in DB...") <NEW_LINE> all_adverts = session.query(CarAdverts).all() <NEW_LINE> for advert in all_adverts: <NEW_LINE> <INDENT> session.query(CarAdverts).filter(CarAdverts.id == advert.id).update({'isvalid': False}) <NEW_LINE> session.commit()
:param session: :return:
625941b655399d3f055884bf
def update_parameter_in_file(path, var_in, new_val, regex_in): <NEW_LINE> <INDENT> _loop_regexps = { 'bar-separated': (r'([a-z]+[\s\|]+)' r'(\w+)' r'(\s*[\|]+\s*)' r'([\w\s\.,;\[\]\-]+)' r'(\s*)'), 'space-separated': (r'(\s*)' r'(\w+)' r'(\s+)' r'([\w\s\.,;\[\]\-]+)' r'(\s*)'), } <NEW_LINE> isfound = False <NEW_LINE> if regex_in in _loop_regexps.keys(): <NEW_LINE> <INDENT> regex_in = _loop_regexps[regex_in] <NEW_LINE> <DEDENT> para_file_in = open(path, 'r') <NEW_LINE> para_file_out = open(path + '.tmp', 'w') <NEW_LINE> for line in para_file_in: <NEW_LINE> <INDENT> if not line.rstrip(): <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> (a, var, b, old_val, c) = re.match(regex_in, line.rstrip()).groups() <NEW_LINE> gc3libs.log.debug( "Read variable '%s' with value '%s' ...", var, old_val) <NEW_LINE> if var == var_in: <NEW_LINE> <INDENT> isfound = True <NEW_LINE> upd_val = new_val <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> upd_val = old_val <NEW_LINE> <DEDENT> para_file_out.write(a + var + b + upd_val + c + '\n') <NEW_LINE> <DEDENT> para_file_out.close() <NEW_LINE> para_file_in.close() <NEW_LINE> os.rename(path + '.tmp', path) <NEW_LINE> if not isfound: <NEW_LINE> <INDENT> gc3libs.log.critical( 'update_parameter_in_file could not find parameter' ' in sepcified file')
Updates a parameter value in a parameter file using predefined regular expressions in `_loop_regexps`. :param path: Full path to the parameter file. :param var_in: The variable to modify. :param new_val: The updated parameter value. :param regex: Name of the regular expression that describes the format of the parameter file.
625941b621bff66bcd684762
def mixnet_m(**kwargs): <NEW_LINE> <INDENT> return get_mixnet(version="m", width_scale=1.0, model_name="mixnet_m", **kwargs)
MixNet-M model from 'MixConv: Mixed Depthwise Convolutional Kernels,' https://arxiv.org/abs/1907.09595. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.tensorflow/models' Location for keeping the model parameters.
625941b61f5feb6acb0c4961
def __rdiv__( self, rhs ): <NEW_LINE> <INDENT> return Duration( self._frame, float( rhs ) / self._seconds )
Divide a Duration by a value. = INPUT VARIABLES - rhs The scalar to divide by. = RETURN VALUE - Returns the scaled Duration.
625941b64527f215b584c268
def GetOriginValue(self): <NEW_LINE> <INDENT> return self.originvalues
#~ if the user select wx.ID_CANCEL,then return originvalues
625941b67cff6e4e81117792
def load_stream(path): <NEW_LINE> <INDENT> stream = read(path) <NEW_LINE> stream.merge() <NEW_LINE> return stream
Loads a Stream object from the file at path. Args: path: path to the input file, (for supported formats see, http://docs.obspy.org/tutorial/code_snippets/reading_seismograms.html) Returns: an obspy.core.Stream object (http://docs.obspy.org/packages/autogen/obspy.core.stream.Stream.html#obspy.core.stream.Stream)
625941b66e29344779a62422
def wait_for_sensor(self): <NEW_LINE> <INDENT> while not GPIO.input(BUSYPin): <NEW_LINE> <INDENT> pass
Blocks until sensor is ready
625941b63c8af77a43ae35aa
def generatePlayers(self, template): <NEW_LINE> <INDENT> raceObjectList = template.players_data <NEW_LINE> tmpPlayers = {} <NEW_LINE> n = 0 <NEW_LINE> for race in raceObjectList: <NEW_LINE> <INDENT> tmpKey = ("player%s" % str(n)) <NEW_LINE> player = Player(race, n, self.game_universe, self.technology) <NEW_LINE> if self.game_variables['DesignCapacity'] != player.designs.DesignCapacity: <NEW_LINE> <INDENT> designCapacity = self.game_variables['DesignCapacity'] <NEW_LINE> player.design.DesignCapacity = designCapacity <NEW_LINE> <DEDENT> startingShipDesigns = startingDesigns() <NEW_LINE> processDesign(startingShipDesigns, player, self.technology) <NEW_LINE> tmpPlayers[tmpKey] = player <NEW_LINE> n+=1 <NEW_LINE> <DEDENT> playNumb = 0 <NEW_LINE> for uniKey, universe in self.game_universe.items(): <NEW_LINE> <INDENT> for p in range(0, int(universe.Players)): <NEW_LINE> <INDENT> tmpKey = ("player%s" % str(playNumb)) <NEW_LINE> if tmpKey in tmpPlayers: <NEW_LINE> <INDENT> player = tmpPlayers[tmpKey] <NEW_LINE> planetHW = universe.createHomeworldPlanet(player.speciesData) <NEW_LINE> homeworld = Colony(player, planetHW, template.starting_population) <NEW_LINE> homeworld.scanner = True <NEW_LINE> player.colonies[planetHW.ID] = homeworld <NEW_LINE> <DEDENT> playNumb += 1 <NEW_LINE> <DEDENT> <DEDENT> return tmpPlayers
input: list of speciesData objects (StandardGameTemplate grabs from .r1 files or development standard object) output: creates a dictionary of player objects Player Object Dictionary key = "player" + (0 to N)
625941b667a9b606de4a7cc9
def shapiro_test(sample): <NEW_LINE> <INDENT> res = stats.shapiro(sample) <NEW_LINE> return ShapiroResult(*res)
Тест Шапиро-Уилка Проверяет гипотезу о нормальности распеределения Parameters ---------- sample : array_like Массив наблюдений Returns ------- statistic : float or array Статистика Шапиро-Уилка pvalue : float or array two-tailed p-value
625941b69b70327d1c4e0be0
def cvtMat3(npmat3): <NEW_LINE> <INDENT> return Mat3(npmat3[0, 0], npmat3[1, 0], npmat3[2, 0], npmat3[0, 1], npmat3[1, 1], npmat3[2, 1], npmat3[0, 2], npmat3[1, 2], npmat3[2, 2])
convert numpy.2darray to LMatrix3f defined in Panda3d :param npmat3: a 3x3 numpy ndarray :return: a LMatrix3f object, see panda3d author: weiwei date: 20161107, tsukuba
625941b6498bea3a759b98be
def test_unpack_ipv4(self): <NEW_LINE> <INDENT> field = OmniGenericIPAddressField._meta.get_field('unpack_ipv4') <NEW_LINE> self.assertIsInstance(field, models.BooleanField) <NEW_LINE> self.assertTrue(field.blank) <NEW_LINE> self.assertFalse(field.null) <NEW_LINE> self.assertFalse(field.default)
The model should have an unpack_ipv4 field
625941b624f1403a92600976
def median(timeseries, segmentlength, overlap, window=None, plan=None): <NEW_LINE> <INDENT> return lal_psd(timeseries, 'medianmean', segmentlength, overlap, window=window, plan=None)
Calculate the power spectral density of the given `TimeSeries` using the median-mean average method. For more details see :lalsuite:`XLALREAL8AverageSpectrumMean`. Parameters ---------- timeseries : `TimeSeries` input `TimeSeries` data segmentlength : `int` number of samples in single average overlap : `int` number of samples between averages window : `~gwpy.window.Window`, optional window function to apply to timeseries prior to FFT plan : :lalsuite:`REAL8FFTPlan`, optional LAL FFT plan to use when generating average spectrum Returns ------- Spectrum median-mean-averaged `Spectrum`
625941b644b2445a33931ead
def parse(self, inputStream): <NEW_LINE> <INDENT> super( DetonationPdu, self).parse(inputStream) <NEW_LINE> self.explodingEntityID.parse(inputStream) <NEW_LINE> self.eventID.parse(inputStream) <NEW_LINE> self.velocity.parse(inputStream) <NEW_LINE> self.locationInWorldCoordinates.parse(inputStream) <NEW_LINE> self.descriptor.parse(inputStream) <NEW_LINE> self.locationOfEntityCoordinates.parse(inputStream) <NEW_LINE> self.detonationResult = inputStream.read_unsigned_byte(); <NEW_LINE> self.numberOfVariableParameters = inputStream.read_unsigned_byte(); <NEW_LINE> self.pad = inputStream.read_unsigned_short(); <NEW_LINE> for idx in range(0, self.numberOfVariableParameters): <NEW_LINE> <INDENT> element = null() <NEW_LINE> element.parse(inputStream) <NEW_LINE> self.variableParameters.append(element)
"Parse a message. This may recursively call embedded objects.
625941b68a43f66fc4b53e76
def update_odometer(self, mileage): <NEW_LINE> <INDENT> if mileage >= self.odometer_reading: <NEW_LINE> <INDENT> self.odometer_reading = mileage <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> print("You cannot roll back an odometer!")
Set the odometer reading to the given value. Reject the change if it attempts to roll the odometer back.
625941b616aa5153ce362285
@app.route('/bookbin/JSON') <NEW_LINE> def showBookbinsJSON(): <NEW_LINE> <INDENT> bookbins = session.query(Bookbin).order_by(asc(Bookbin.name)) <NEW_LINE> return jsonify(bookbins=[b.serialize for b in bookbins])
Return bookbins as JSON object
625941b65f7d997b871748a8
def searchMatrix(self, matrix, target): <NEW_LINE> <INDENT> m = len(matrix) <NEW_LINE> if m == 0: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> n = len(matrix[0]) <NEW_LINE> if n == 0: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> L = m * n <NEW_LINE> return self.binarySearch(matrix, 0, L-1, target)
:type matrix: List[List[int]] :type target: int :rtype: bool
625941b699cbb53fe67929f4
def __init__(self, name, student_no): <NEW_LINE> <INDENT> self._name = name <NEW_LINE> self._sno = student_no <NEW_LINE> self._enrolments = []
Create a Student with a name and unique student number Constructor: Student(str, int)
625941b6cc40096d61595760
def auth_settings(self, auth_method, query_params, resource_path, body): <NEW_LINE> <INDENT> if auth_method == 'basicAuth': <NEW_LINE> <INDENT> return { 'type': 'basic', 'in': 'header', 'key': 'Authorization', 'value': self.get_basic_auth_token() }
Gets Auth Settings dict for api client. :param resource_path: Resource path for the endpoint. Needed for XPayToken :return: The Auth Settings information dict.
625941b6adb09d7d5db6c5a0
def minimize_window(self): <NEW_LINE> <INDENT> self.parent.showMinimized()
-- minimize the main window
625941b663b5f9789fde6ef2
def print_and_save_words(save_file, words_dict): <NEW_LINE> <INDENT> with open(save_file, 'w') as s: <NEW_LINE> <INDENT> max_width = 25 <NEW_LINE> for i, word in enumerate(sorted(words_dict)): <NEW_LINE> <INDENT> sentence_lines = ','.join([str(line) for line in words_dict[word]]) <NEW_LINE> line = '{word: <{width}} {{{length}:{sentence_list}}}'.format( word=word, width=max_width, length=len(words_dict[word]), sentence_list=sentence_lines) <NEW_LINE> print(line) <NEW_LINE> s.write(line + '\n')
Print and save each word found in the text file in this format: {Word}(Spacing){Number of Occurrences}:{Sentence of Occurrence} Spacing will be a max width of 25 spaces between the word and the occurrence values :return: Nothing
625941b663f4b57ef0000f2f
def __load_blast(transcript, data_dict=None, reverse=False): <NEW_LINE> <INDENT> max_target_seqs = transcript.configuration.pick.chimera_split.blast_params.max_target_seqs <NEW_LINE> maximum_evalue = transcript.configuration.pick.chimera_split.blast_params.evalue <NEW_LINE> if data_dict is None: <NEW_LINE> <INDENT> blast_hits_query = [_.as_dict() for _ in transcript.blast_baked(transcript.session).params( query=transcript.id, evalue=maximum_evalue)] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> blast_hits_query = data_dict.get("hits", dict()).get(transcript.id, []) <NEW_LINE> <DEDENT> transcript.logger.debug("Starting to load BLAST data for %s", transcript.id) <NEW_LINE> previous_evalue = -1 <NEW_LINE> counter = 0 <NEW_LINE> for hit in blast_hits_query: <NEW_LINE> <INDENT> if counter > max_target_seqs and previous_evalue < hit["evalue"]: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> elif previous_evalue < hit["evalue"]: <NEW_LINE> <INDENT> previous_evalue = hit["evalue"] <NEW_LINE> <DEDENT> query_frames = [_["query_frame"] for _ in hit["hsps"]] <NEW_LINE> transcript.logger.debug("Query frames for %s: %s", transcript.id, query_frames) <NEW_LINE> if reverse is True: <NEW_LINE> <INDENT> query_frames = [_ * -1 for _ in query_frames] <NEW_LINE> transcript.logger.debug("Query frames for %s after reversal: %s", transcript.id, query_frames) <NEW_LINE> <DEDENT> if any(_ < 0 for _ in query_frames): <NEW_LINE> <INDENT> transcript.logger.debug("Hit %s skipped for %s as it is on opposite strand", hit["target"], transcript.id) <NEW_LINE> continue <NEW_LINE> <DEDENT> counter += 1 <NEW_LINE> transcript.blast_hits.append(hit) <NEW_LINE> <DEDENT> transcript.logger.debug("Loaded %d BLAST hits for %s", counter, transcript.id)
This method looks into the DB for hits corresponding to the desired requirements. Hits will be loaded into the "blast_hits" list; we will not store the SQLAlchemy query object, but rather its representation as a dictionary (using the Hit.as_dict() method). :param transcript: the Transcript instance :type transcript: Mikado.loci_objects.transcript.Transcript
625941b61f037a2d8b94600c
def resource_refresh( self, name="runner.pcmk.refresh", instead=None, before=None, resource=None, node=None, strict=False, stdout="", stderr="", returncode=0, ): <NEW_LINE> <INDENT> cmd = ["crm_resource", "--refresh"] <NEW_LINE> if resource: <NEW_LINE> <INDENT> cmd.extend(["--resource", resource]) <NEW_LINE> <DEDENT> if node: <NEW_LINE> <INDENT> cmd.extend(["--node", node]) <NEW_LINE> <DEDENT> if strict: <NEW_LINE> <INDENT> cmd.extend(["--force"]) <NEW_LINE> <DEDENT> self.__calls.place( name, RunnerCall( cmd, stdout=stdout, stderr=stderr, returncode=returncode, ), before=before, instead=instead, )
Create a call for crm_resource --refresh string name -- the key of this call string instead -- the key of a call instead of which this new call is to be placed string before -- the key of a call before which this new call is to be placed string resource -- the id of a resource to be cleaned string node -- the name of the node where resources should be cleaned bool strict -- strict mode of 'crm_resource refresh' enabled? string stdout -- crm_resource's stdout string stderr -- crm_resource's stderr int returncode -- crm_resource's returncode
625941b6099cdd3c635f0a69
def fix_dimensions_vector_2darray(vector): <NEW_LINE> <INDENT> if not isinstance(vector, collections.Iterable): <NEW_LINE> <INDENT> vector = np.array([vector]) <NEW_LINE> <DEDENT> elif not isinstance(vector, np.ndarray): <NEW_LINE> <INDENT> vector = np.array(vector) <NEW_LINE> <DEDENT> if len(vector.shape) <= 1: <NEW_LINE> <INDENT> vector = np.expand_dims(vector, axis=1) <NEW_LINE> <DEDENT> return vector
Fix the dimensions of an input so that it is a :class:`numpy.ndarray` of shape (N,1). :param vector: numerical object :rtype: :class:`numpy.ndarray` :returns: array of shape (N,1)
625941b6ab23a570cc24ff8d
def get_value_from_tag(self, tag, attribute): <NEW_LINE> <INDENT> value = tag.get(self._ns(attribute)) <NEW_LINE> if value is None: <NEW_LINE> <INDENT> value = tag.get(attribute) <NEW_LINE> if value: <NEW_LINE> <INDENT> log.warning("Failed to get the attribute '{}' on tag '{}' with namespace. " "But found the same attribute without namespace!".format(attribute, tag.tag)) <NEW_LINE> <DEDENT> <DEDENT> return value
Return the value of the android prefixed attribute in a specific tag. This function will always try to get the attribute with a android: prefix first, and will try to return the attribute without the prefix, if the attribute could not be found. This is useful for some broken AndroidManifest.xml, where no android namespace is set, but could also indicate malicious activity (i.e. wrongly repackaged files). A warning is printed if the attribute is found without a namespace prefix. If you require to get the exact result you need to query the tag directly: example:: >>> from lxml.etree import Element >>> tag = Element('bar', nsmap={'android': 'http://schemas.android.com/apk/res/android'}) >>> tag.set('{http://schemas.android.com/apk/res/android}foobar', 'barfoo') >>> tag.set('name', 'baz') # Assume that `a` is some APK object >>> a.get_value_from_tag(tag, 'name') 'baz' >>> tag.get('name') 'baz' >>> tag.get('foobar') None >>> a.get_value_from_tag(tag, 'foobar') 'barfoo' :param lxml.etree.Element tag: specify the tag element :param str attribute: specify the attribute name :returns: the attribute's value, or None if the attribute is not present
625941b6462c4b4f79d1d4dd
def __getitem__(self, k): <NEW_LINE> <INDENT> if not 0 <= k < self.n: <NEW_LINE> <INDENT> return IndexError('k is out of bounds!') <NEW_LINE> <DEDENT> return self.A[k]
Get an element of the array at index k Usage: _array[k]
625941b67047854f462a121a
def new_tasks(self, extra): <NEW_LINE> <INDENT> tasks = [] <NEW_LINE> for input_file_name in os.listdir(self.params.structure_data): <NEW_LINE> <INDENT> if not input_file_name.endswith(".mat"): <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> input_file = os.path.abspath(os.path.join(self.params.structure_data, input_file_name)) <NEW_LINE> for model in self.params.models.split(','): <NEW_LINE> <INDENT> (a,b,c,data_index) = input_file_name.split('_') <NEW_LINE> jobname = "gminarevix-%s-%s" % (model,(input_file_name)) <NEW_LINE> extra_args = extra.copy() <NEW_LINE> extra_args['jobname'] = jobname <NEW_LINE> if self.params.run_binary: <NEW_LINE> <INDENT> extra_args['run_binary'] = self.params.run_binary <NEW_LINE> <DEDENT> extra_args['output_dir'] = self.params.output <NEW_LINE> extra_args['output_dir'] = extra_args['output_dir'].replace('NAME', os.path.join(model, input_file_name)) <NEW_LINE> extra_args['output_dir'] = extra_args['output_dir'].replace('SESSION', os.path.join(model, input_file_name)) <NEW_LINE> extra_args['output_dir'] = extra_args['output_dir'].replace('DATE', os.path.join(model, input_file_name)) <NEW_LINE> extra_args['output_dir'] = extra_args['output_dir'].replace('TIME', os.path.join(model, input_file_name)) <NEW_LINE> tasks.append(GminarevixApplication( model, input_file, **extra_args)) <NEW_LINE> <DEDENT> <DEDENT> return tasks
For each of the network data and for each of the selected benchmarks, create a GminarevixApplication. First loop the input files, then loop the selected benchmarks
625941b66aa9bd52df036baf
def stop(bot, update, job_queue, chat_data): <NEW_LINE> <INDENT> chat_id = update.message.chat_id <NEW_LINE> if chat_id in config['data']: <NEW_LINE> <INDENT> del config['data'][chat_id] <NEW_LINE> with open(configFilename, 'w') as f: <NEW_LINE> <INDENT> json.dump(config, f) <NEW_LINE> <DEDENT> <DEDENT> if 'job' not in chat_data: <NEW_LINE> <INDENT> update.message.reply_text('Nothing was running! Start using /start.') <NEW_LINE> return <NEW_LINE> <DEDENT> job = chat_data['job'] <NEW_LINE> job.schedule_removal() <NEW_LINE> del chat_data['job'] <NEW_LINE> update.message.reply_text('Stopped successful! Start again using /start.')
Stops the livestream polling.
625941b6aad79263cf390847
def getDihedralEnergy(self, time_series=False, block="AUTO"): <NEW_LINE> <INDENT> return self.getRecord("DIHED", time_series, _Units.Energy.kcal_per_mol, block)
Get the dihedral energy. Parameters ---------- time_series : bool Whether to return a list of time series records. block : bool Whether to block until the process has finished running. Returns ------- energy : :class:`Energy <BioSimSpace.Types.Energy>` The dihedral energy.
625941b623e79379d52ee375
def recent(self, page=0): <NEW_LINE> <INDENT> return Recent(self.base_url, self.use_tor, page)
Lists most recent Torrents added to TPB.
625941b666656f66f7cbbfb7
def __str__(self): <NEW_LINE> <INDENT> return 'Kerr07 Cell %d Type %d (%s)' % (self.id, self.type, self.types[self.type])
Produce a string to be used when the object is printed
625941b6c4546d3d9de7283d
def apply(self, *args): <NEW_LINE> <INDENT> return _Math.ButterworthBandpassD_apply(self, *args)
apply(ButterworthBandpassD self, int n, double * inout)
625941b615fb5d323cde0916
def get_player_position(self): <NEW_LINE> <INDENT> x = 0 <NEW_LINE> y = 0 <NEW_LINE> for line in self.maze: <NEW_LINE> <INDENT> for c in line: <NEW_LINE> <INDENT> if c == "S": <NEW_LINE> <INDENT> return x, y <NEW_LINE> <DEDENT> x = x + 1 <NEW_LINE> <DEDENT> y = y+1 <NEW_LINE> x = 0
Return the startup position for the player
625941b6b5575c28eb68de0a
def __init__(self, frame_number: int, modifier_list: list, object_operator_spec: OperatorSpecObjectMode = None): <NEW_LINE> <INDENT> self.frame_number = frame_number <NEW_LINE> self.modifier_list = modifier_list <NEW_LINE> self.object_operator_spec = object_operator_spec
Constructs a Deform Modifier spec (for user input) :param frame_number: int - the frame at which animated keyframe is inserted :param modifier_list: ModifierSpec - contains modifiers :param object_operator_spec: OperatorSpecObjectMode - contains object operators
625941b6de87d2750b85fb9b
def set_clear(self): <NEW_LINE> <INDENT> self.clear() <NEW_LINE> self.write_display()
Clear and refresh the display.
625941b65166f23b2e1a4f66
def handle_add_button(self): <NEW_LINE> <INDENT> if self.add_command: <NEW_LINE> <INDENT> self.add_command({ 'classid': self.id_entry.value, 'url': self.url_entry.value })
Docstring for add_class
625941b685dfad0860c3ac66
def wait_for_event_timeout(e, t): <NEW_LINE> <INDENT> while not e.is_set(): <NEW_LINE> <INDENT> logging.debug('wait_for_event_timeout starting') <NEW_LINE> <DEDENT> event_is_set = e.wait(t) <NEW_LINE> logging.debug('event set: %s', event_is_set) <NEW_LINE> if event_is_set: <NEW_LINE> <INDENT> logging.debug('processing event') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> logging.debug('doing other work')
Wait t seconds and then timeout
625941b6498bea3a759b98bf
def get_type(self): <NEW_LINE> <INDENT> return self.type_idx_value
Return the type of the field :rtype: string
625941b660cbc95b062c6356
def hex_ring(h3_address, ring_size): <NEW_LINE> <INDENT> array_len = 6 * ring_size <NEW_LINE> HexRingArray = c_long * array_len <NEW_LINE> hex_rings = HexRingArray() <NEW_LINE> success = libh3.hexRing(string_to_h3(h3_address), ring_size, hex_rings) <NEW_LINE> if success != 0: <NEW_LINE> <INDENT> raise Exception( 'Failed to get hexagon ring for pentagon {}'.format(h3_address)) <NEW_LINE> <DEDENT> return hexagon_c_array_to_set(hex_rings)
Get a hexagon ring for a given hexagon. Returns individual rings, unlike `k_ring`. If a pentagon is reachable, falls back to a MUCH slower form based on `k_ring`.
625941b6d58c6744b4257a6e
def get_xyz_from_radar(radar): <NEW_LINE> <INDENT> azimuth_1D = radar.azimuth['data'] <NEW_LINE> elevation_1D = radar.elevation['data'] <NEW_LINE> srange_1D = radar.range['data'] <NEW_LINE> sr_2d, az_2d = np.meshgrid(srange_1D, azimuth_1D) <NEW_LINE> el_2d = np.meshgrid(srange_1D, elevation_1D)[1] <NEW_LINE> xx, yy, zz = radar_coords_to_cart(sr_2d/RNG_MULT, az_2d, el_2d) <NEW_LINE> return xx, yy, zz + np.median(radar.altitude['data'])
Input radar object, return z from radar (km, 2D)
625941b691f36d47f21ac303
def get_data(self, endpoint: str) -> str: <NEW_LINE> <INDENT> self.log.info(f'requesting api data for {endpoint}') <NEW_LINE> response = requests.get(f'{self.api}/{endpoint}', headers=api.AUTHORIZATION_HEADER, stream=True) <NEW_LINE> try: <NEW_LINE> <INDENT> response.raise_for_status() <NEW_LINE> <DEDENT> except Exception as error: <NEW_LINE> <INDENT> self.success = (False, f'DOC API {endpoint} endpoint failure') <NEW_LINE> self.log.fatal(error) <NEW_LINE> return 'Fail' <NEW_LINE> <DEDENT> self.log.debug('streaming data') <NEW_LINE> content_hash = xxh64() <NEW_LINE> with (self.corrections / f'{endpoint}.json').open(mode='wb') as cursor: <NEW_LINE> <INDENT> for chunk in response.iter_content(chunk_size=128): <NEW_LINE> <INDENT> cursor.write(chunk) <NEW_LINE> content_hash.update(chunk) <NEW_LINE> <DEDENT> <DEDENT> return content_hash.hexdigest()
makes a request to the endpoint writes the data to a json file named after the endpoint and returns the hashed value of the content or a 'Fail' constant on failure
625941b607f4c71912b11294
def build_char_table(self,filename): <NEW_LINE> <INDENT> freq_table={} <NEW_LINE> f=open(filename,'r') <NEW_LINE> s=f.read() <NEW_LINE> for x in s: <NEW_LINE> <INDENT> i=s.count(x) <NEW_LINE> freq_table[x]=i <NEW_LINE> <DEDENT> return freq_table
Build and return a hash that maps every character in the file 'filename' to the number of occurences of that char in the file
625941b68e7ae83300e4add9
def migration_plan(self, targets): <NEW_LINE> <INDENT> plan = [] <NEW_LINE> applied = set(self.loader.applied_migrations) <NEW_LINE> for target in targets: <NEW_LINE> <INDENT> if target[1] is None: <NEW_LINE> <INDENT> for root in self.loader.graph.root_nodes(): <NEW_LINE> <INDENT> if root[0] == target[0]: <NEW_LINE> <INDENT> for migration in self.loader.graph.backwards_plan(root): <NEW_LINE> <INDENT> if migration in applied: <NEW_LINE> <INDENT> plan.append((self.loader.graph.nodes[migration], True)) <NEW_LINE> applied.remove(migration) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> <DEDENT> elif target in applied: <NEW_LINE> <INDENT> backwards_plan = self.loader.graph.backwards_plan(target)[:-1] <NEW_LINE> if any(node[0] == target[0] for node in backwards_plan): <NEW_LINE> <INDENT> for migration in backwards_plan: <NEW_LINE> <INDENT> if migration in applied: <NEW_LINE> <INDENT> plan.append((self.loader.graph.nodes[migration], True)) <NEW_LINE> applied.remove(migration) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> for migration in self.loader.graph.forwards_plan(target): <NEW_LINE> <INDENT> if migration not in applied: <NEW_LINE> <INDENT> plan.append((self.loader.graph.nodes[migration], False)) <NEW_LINE> applied.add(migration) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> return plan
Given a set of targets, returns a list of (Migration instance, backwards?).
625941b623849d37ff7b2e9f
def url_concat(url, d): <NEW_LINE> <INDENT> SPACE = 'SUCHSPAAAAAACE' <NEW_LINE> new_d = {k: v.replace(' ', SPACE) for k, v in d.items()} <NEW_LINE> return _url_concat(url, new_d).replace(SPACE, '%20')
url u, dict d space handling for mpc webserver is nonstandard + as space doesn't work, %20 does can't replace before url_concat or it gets double encoded, so replace with a, uh, silly string. this is totally representative of my skill level in general. i think this is also my only docstring in this repo, so, enjoy ref http://stackoverflow.com/questions/2678551
625941b60c0af96317bb7ff7
def testCxInputImpulseResponseA(self): <NEW_LINE> <INDENT> b, a= self.designIIR() <NEW_LINE> a=muxZeros(a) <NEW_LINE> self.setProps(aCmplx=True,bCmplx=False) <NEW_LINE> self.impluseResponseTest(a,b)
test 1 with real data but complex a & real b
625941b6ec188e330fd5a5b5
def append(self, picker_list, **kwargs): <NEW_LINE> <INDENT> old_length = len(self) <NEW_LINE> new_length = old_length + np.sum(len(p) for p in picker_list) <NEW_LINE> new_data = np.resize(self._data, (new_length,)) <NEW_LINE> row_idx = old_length <NEW_LINE> for picker in picker_list: <NEW_LINE> <INDENT> new_data[row_idx:row_idx+len(picker._data)] = picker._data <NEW_LINE> row_idx += len(picker._data) <NEW_LINE> <DEDENT> if len(kwargs) > 0: <NEW_LINE> <INDENT> if len(kwargs) > 1: print("warning: too many arguments") <NEW_LINE> fieldname = list(kwargs.keys())[0] <NEW_LINE> labels = kwargs[fieldname] <NEW_LINE> newcolumn = np.empty(shape=(new_length,), dtype=np.int) <NEW_LINE> newcolumn[:old_length] = labels[0] <NEW_LINE> row_idx = old_length <NEW_LINE> for label, picker in zip(labels[1:], picker_list): <NEW_LINE> <INDENT> newcolumn[row_idx:row_idx+len(picker._data)] = label <NEW_LINE> row_idx += len(picker._data) <NEW_LINE> <DEDENT> new_data = mlab.rec_append_fields(new_data, fieldname, newcolumn) <NEW_LINE> <DEDENT> self._data = new_data
Resize my data and add in the data from Pickers in picker_list note: equality test fails on picker2 for some reason Will also add a new column if you specify. Usage: p1.append([p2, p3], ratname=(1,2,3)) Now p1 has all of the data from p1, p2, and p3. p1['ratname'] is 1, 2, or 3, depending on the source.
625941b67c178a314d6ef265
def test_upgrade_downgrade_fail_after_upgrade(fx_cfg_yml_file_use_db_url, fx_only_support_pgsql): <NEW_LINE> <INDENT> database_engine = app.config['DATABASE_ENGINE'] <NEW_LINE> Base.metadata.drop_all(bind=database_engine) <NEW_LINE> database_engine.execute( "drop table if exists alembic_version;" ) <NEW_LINE> p = subprocess.Popen( [ 'cliche', 'upgrade', '-c', str(fx_cfg_yml_file_use_db_url), '27e81ea4d86' ], stdout=subprocess.PIPE, stderr=subprocess.PIPE ) <NEW_LINE> p.communicate() <NEW_LINE> p = subprocess.Popen( [ 'cliche', 'upgrade', '-c', str(fx_cfg_yml_file_use_db_url) ], stdout=subprocess.PIPE, stderr=subprocess.PIPE ) <NEW_LINE> p.communicate() <NEW_LINE> p = subprocess.Popen( [ 'cliche', 'upgrade', '-c', str(fx_cfg_yml_file_use_db_url), 'zzzzzzzzzzz' ], stdout=subprocess.PIPE, stderr=subprocess.PIPE ) <NEW_LINE> out, err = p.communicate() <NEW_LINE> exit_code = p.returncode <NEW_LINE> assert "No such revision or branch 'zzzzzzzzzzz'" in err.decode('u8') <NEW_LINE> assert exit_code == 1
downgrade work incorrectly after upgrade
625941b6090684286d50eaed
def eval_features(self, learn_config, features_to_eval, loss_function=None, eval_type=EvalType.SeqAdd, eval_metrics=None, thread_count=-1, eval_step=None, label_mode=LabelMode.AddFeature): <NEW_LINE> <INDENT> features_to_eval = set(features_to_eval) <NEW_LINE> if eval_metrics is None: <NEW_LINE> <INDENT> eval_metrics = [] <NEW_LINE> <DEDENT> eval_metrics = eval_metrics if isinstance(eval_metrics, list) else [eval_metrics] <NEW_LINE> if isinstance(learn_config, CatBoost): <NEW_LINE> <INDENT> params = learn_config.get_params() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> params = dict(learn_config) <NEW_LINE> <DEDENT> if loss_function is not None: <NEW_LINE> <INDENT> if "loss_function" in params and params["loss_function"] != loss_function: <NEW_LINE> <INDENT> raise CatBoostError("Loss function in params {} should be equal to feature evaluation objective " "function {}".format(params["loss_function"], loss_function)) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> if "loss_function" not in params: <NEW_LINE> <INDENT> raise CatBoostError("Provide loss function in params or as option to eval_features method") <NEW_LINE> <DEDENT> <DEDENT> if thread_count is not None and thread_count != -1: <NEW_LINE> <INDENT> params["thread_count"] = thread_count <NEW_LINE> <DEDENT> if eval_step is None: <NEW_LINE> <INDENT> eval_step = 1 <NEW_LINE> <DEDENT> if loss_function is not None: <NEW_LINE> <INDENT> params["loss_function"] = loss_function <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> loss_function = params["loss_function"] <NEW_LINE> <DEDENT> if params["loss_function"] == "PairLogit": <NEW_LINE> <INDENT> raise CatBoostError("Pair classification is not supported") <NEW_LINE> <DEDENT> baseline_case, test_cases = self._create_eval_feature_cases(params, features_to_eval, eval_type=eval_type, label_mode=label_mode) <NEW_LINE> if loss_function not in eval_metrics: <NEW_LINE> <INDENT> eval_metrics.append(loss_function) <NEW_LINE> <DEDENT> return self.eval_cases(baseline_case=baseline_case, compare_cases=test_cases, eval_metrics=eval_metrics, thread_count=thread_count, eval_step=eval_step)
Evaluate features. Args: learn_config: dict with params or instance of CatBoost. In second case instance params will be used features_to_eval: list of indices of features to evaluate loss_function: one of CatBoost loss functions, get it from learn_config if not specified eval_type: Type of feature evaluate (All, SeqAdd, SeqRem) eval_metrics: Additional metrics to calculate thread_count: thread_count to use. If not none will override learn_config values Returns ------- result : Instance of EvaluationResult class
625941b6e64d504609d7464e
def cal_extraterrestrial_radiation_horizontal_five_min_daily(self, doy): <NEW_LINE> <INDENT> I_s = [] <NEW_LINE> for i in range(6, 19): <NEW_LINE> <INDENT> for j in seq(0,1,1/12): <NEW_LINE> <INDENT> w1 = i + j <NEW_LINE> w2 = i + j + 1/12 <NEW_LINE> value = self.cal_extraterrestrial_radiation_horizontal_between_timestamp(w1, w2, doy) <NEW_LINE> I_s.append(round(value / 10000,2)) <NEW_LINE> <DEDENT> <DEDENT> return I_s
hourly radiation from localtime to (localtime + 1)
625941b60383005118ecf3f2
def resourceURI(self): <NEW_LINE> <INDENT> return _DataModel.WaveformStreamID_resourceURI(self)
resourceURI(WaveformStreamID self) -> std::string const &
625941b6cc0a2c11143dcca6
def sendToRsu(self, rsu, message, currentTime, network): <NEW_LINE> <INDENT> message.indexRsu.append(rsu.id) <NEW_LINE> self.simulateTranferTime( preReceive=rsu.preReceiveFromCar, meanTranfer=Config.carRsuMeanTranfer, message=message, ) <NEW_LINE> message.locations.append(1) <NEW_LINE> rsu.preReceiveFromCar = message.currentTime <NEW_LINE> if message.currentTime > currentTime + Config.cycleTime: <NEW_LINE> <INDENT> rsu.waitList.append(message) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> network.addToHeap(message)
Simualte send message from car to rsu Args: rsu ([RsuSimulator]): [description] message ([Message]): [description] currentTime ([float]): [description] network ([Network]): [description]
625941b6a8370b77170526af
def purge_db(self): <NEW_LINE> <INDENT> state = self.get_state() <NEW_LINE> self.reset_state(DatabaseState(), state)
Function: purge_db ------------------ Remove everything from the database.
625941b6d6c5a10208143e54
def init_plot(self): <NEW_LINE> <INDENT> self.figure.clf() <NEW_LINE> self.axes = self.figure.add_axes([0, 0, 1, 1]) <NEW_LINE> self.axes.set_xlim(self.Xlim) <NEW_LINE> self.axes.set_ylim(self.Ylim) <NEW_LINE> self.axes.axis('off') <NEW_LINE> positions = nx.get_node_attributes(self.network, 'position') <NEW_LINE> color = ['black'] <NEW_LINE> self.bgBox = self.draw_edges(self.network, pos=positions, ax=self.axes, arrow=True, edge_color=color, width=self.edgeWidthSize) <NEW_LINE> self.draw_idle()
Update canvas to plot new queue visualization
625941b6a8ecb033257d2ee4
def test_create_reorders_step(self): <NEW_LINE> <INDENT> recipe = Recipe.objects.first() <NEW_LINE> self.assertEqual(recipe.steps.count(), 1) <NEW_LINE> first_step = recipe.steps.first() <NEW_LINE> middle_step = Step.objects.create( recipe=recipe, duration=10, step_number=2, step_title='Step 2' ) <NEW_LINE> end_step = Step.objects.create( recipe=recipe, duration=10, step_number=3, step_title='Step 3' ) <NEW_LINE> url = steps_endpoint.format(recipe.pk) <NEW_LINE> response = self.client.post(url, { 'duration': 12, 'step_number': 2, 'step_title': 'New Step 2', }) <NEW_LINE> self.assertEqual(response.status_code, status.HTTP_201_CREATED) <NEW_LINE> first_step.refresh_from_db() <NEW_LINE> middle_step.refresh_from_db() <NEW_LINE> end_step.refresh_from_db() <NEW_LINE> self.assertEqual(first_step.step_number, 1) <NEW_LINE> self.assertEqual(middle_step.step_number, 3) <NEW_LINE> self.assertEqual(end_step.step_number, 4)
Ensure new Step with duplicate step_number reorders the others.
625941b6187af65679ca4f2d
def _construct_dsdl_definitions_from_namespace( root_namespace_path: str, ) -> typing.List[_dsdl_definition.DSDLDefinition]: <NEW_LINE> <INDENT> def on_walk_error(os_ex: Exception) -> None: <NEW_LINE> <INDENT> raise os_ex <NEW_LINE> <DEDENT> walker = os.walk(root_namespace_path, onerror=on_walk_error, followlinks=True) <NEW_LINE> source_file_paths = [] <NEW_LINE> for root, _dirnames, filenames in walker: <NEW_LINE> <INDENT> for filename in fnmatch.filter(filenames, _DSDL_FILE_GLOB): <NEW_LINE> <INDENT> source_file_paths.append(os.path.join(root, filename)) <NEW_LINE> <DEDENT> <DEDENT> _logger.debug("DSDL files in the namespace dir %r are listed below:", root_namespace_path) <NEW_LINE> for a in source_file_paths: <NEW_LINE> <INDENT> _logger.debug(_LOG_LIST_ITEM_PREFIX + a) <NEW_LINE> <DEDENT> output = [] <NEW_LINE> for fp in source_file_paths: <NEW_LINE> <INDENT> dsdl_def = _dsdl_definition.DSDLDefinition(fp, root_namespace_path) <NEW_LINE> output.append(dsdl_def) <NEW_LINE> <DEDENT> return list(sorted(output, key=lambda d: (d.full_name, -d.version.major, -d.version.minor)))
Accepts a directory path, returns a sorted list of abstract DSDL file representations. Those can be read later. The definitions are sorted by name lexicographically, then by major version (greatest version first), then by minor version (same ordering as the major version).
625941b6627d3e7fe0d68c5c
def test_ToHSV_1(self): <NEW_LINE> <INDENT> color=(0.0,1.0,0.5) <NEW_LINE> self.rvalue = ToHSV(color) <NEW_LINE> self.assertEqual(self.rvalue,(0.41666666666666669, 1.0, 1.0))
tests ToHSV
625941b62c8b7c6e89b355d2
def init(self): <NEW_LINE> <INDENT> filename = os.path.realpath(self.filename) <NEW_LINE> env = db.DBEnv() <NEW_LINE> env.set_lk_detect(db.DB_LOCK_DEFAULT) <NEW_LINE> env.open( os.path.dirname(filename), db.DB_PRIVATE | db.DB_THREAD | db.DB_INIT_LOCK | db.DB_INIT_MPOOL | db.DB_CREATE, ) <NEW_LINE> d = db.DB(env) <NEW_LINE> d.open(filename, 'main', db.DB_BTREE, db.DB_THREAD | db.DB_RDONLY) <NEW_LINE> wallet_data = collections.OrderedDict((k, d[k]) for k in d.keys()) <NEW_LINE> data = {} <NEW_LINE> purpose = collections.defaultdict(list) <NEW_LINE> for key, value in wallet_data.items(): <NEW_LINE> <INDENT> kds = BCDataStream(key) <NEW_LINE> vds = BCDataStream(value) <NEW_LINE> _type = kds.read_string().decode() <NEW_LINE> if _type == 'name': <NEW_LINE> <INDENT> label = vds.read_string().decode() <NEW_LINE> address = kds.read_string().decode() <NEW_LINE> data[address] = label <NEW_LINE> <DEDENT> elif _type == "purpose": <NEW_LINE> <INDENT> category = vds.read_string().decode() <NEW_LINE> address = kds.read_string().decode() <NEW_LINE> purpose[category].append(address) <NEW_LINE> <DEDENT> <DEDENT> for address, label in data.items(): <NEW_LINE> <INDENT> for category, addresses in purpose.items(): <NEW_LINE> <INDENT> if address in addresses: <NEW_LINE> <INDENT> self.addresses[category][label].append(address)
初始化
625941b6a79ad161976cbf53