code
stringlengths
4
4.48k
docstring
stringlengths
1
6.45k
_id
stringlengths
24
24
def load_manifest(self, manifest_name: str): <NEW_LINE> <INDENT> if manifest_name not in self.manifest_file_names: <NEW_LINE> <INDENT> raise ValueError( f"Manifest to load ({manifest_name}) is not one of the " "valid manifest names for this dataset. Valid names include:\n" f"{self.manifest_file_names}" ) <NEW_LINE> <DEDENT> if manifest_name != self.latest_manifest_file: <NEW_LINE> <INDENT> self._warn_of_outdated_manifest(manifest_name) <NEW_LINE> <DEDENT> manifest_path = os.path.join(self._cache_dir, manifest_name) <NEW_LINE> if not os.path.exists(manifest_path): <NEW_LINE> <INDENT> self._download_manifest(manifest_name) <NEW_LINE> <DEDENT> self._manifest = self._load_manifest(manifest_name) <NEW_LINE> with open(self._manifest_last_used, 'w') as out_file: <NEW_LINE> <INDENT> out_file.write(manifest_name) <NEW_LINE> <DEDENT> self._manifest_name = manifest_name
Load a manifest from this dataset. Parameters ---------- manifest_name: str The name of the manifest to load. Must be an element in self.manifest_file_names
625941b78da39b475bd64dbc
def response_get(self, **kwargs): <NEW_LINE> <INDENT> kwargs['_return_http_data_only'] = True <NEW_LINE> if kwargs.get('callback'): <NEW_LINE> <INDENT> return self.response_get_with_http_info(**kwargs) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> (data) = self.response_get_with_http_info(**kwargs) <NEW_LINE> return data
Find all Responses <p><strong>Permissions:</strong> ✓ Respondent ✗ Customer ✓ Manager</p> This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.response_get(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int skip: The number of results to skip. :param int limit: The maximum number of results to return. :param str where: JSON formatted string. :param str sort: Attribute used to sort results. :return: list[Response] If the method is called asynchronously, returns the request thread.
625941b7cb5e8a47e48b78f5
def test_variable_name_change(self): <NEW_LINE> <INDENT> self.open_dataset("iris") <NEW_LINE> idx = self.widget.domain_editor.model().createIndex(4, 0) <NEW_LINE> self.widget.domain_editor.model().setData(idx, "a", Qt.EditRole) <NEW_LINE> self.widget.apply_button.click() <NEW_LINE> data = self.get_output(self.widget.Outputs.data) <NEW_LINE> self.assertIn("a", data.domain) <NEW_LINE> idx = self.widget.domain_editor.model().createIndex(3, 0) <NEW_LINE> self.widget.domain_editor.model().setData(idx, "d", Qt.EditRole) <NEW_LINE> self.widget.apply_button.click() <NEW_LINE> data = self.get_output(self.widget.Outputs.data) <NEW_LINE> self.assertIn("d", data.domain) <NEW_LINE> idx = self.widget.domain_editor.model().createIndex(4, 0) <NEW_LINE> self.widget.domain_editor.model().setData(idx, "b", Qt.EditRole) <NEW_LINE> idx = self.widget.domain_editor.model().createIndex(4, 1) <NEW_LINE> self.widget.domain_editor.model().setData(idx, "text", Qt.EditRole) <NEW_LINE> self.widget.apply_button.click() <NEW_LINE> data = self.get_output(self.widget.Outputs.data) <NEW_LINE> self.assertIn("b", data.domain) <NEW_LINE> self.assertIsInstance(data.domain["b"], StringVariable) <NEW_LINE> idx = self.widget.domain_editor.model().createIndex(4, 0) <NEW_LINE> self.widget.domain_editor.model().setData(idx, "c", Qt.EditRole) <NEW_LINE> idx = self.widget.domain_editor.model().createIndex(4, 1) <NEW_LINE> self.widget.domain_editor.model().setData( idx, "categorical", Qt.EditRole) <NEW_LINE> self.widget.apply_button.click() <NEW_LINE> data = self.get_output(self.widget.Outputs.data) <NEW_LINE> self.assertIn("c", data.domain) <NEW_LINE> self.assertIsInstance(data.domain["c"], DiscreteVariable) <NEW_LINE> self.open_dataset("zoo") <NEW_LINE> idx = self.widget.domain_editor.model().createIndex(0, 0) <NEW_LINE> self.widget.domain_editor.model().setData(idx, "c", Qt.EditRole) <NEW_LINE> idx = self.widget.domain_editor.model().createIndex(0, 1) <NEW_LINE> self.widget.domain_editor.model().setData(idx, "numeric", Qt.EditRole) <NEW_LINE> self.widget.apply_button.click() <NEW_LINE> data = self.get_output(self.widget.Outputs.data) <NEW_LINE> self.assertIn("c", data.domain) <NEW_LINE> self.assertIsInstance(data.domain["c"], ContinuousVariable)
Test whether the name of the variable is changed correctly by the domaineditor.
625941b799fddb7c1c9de1d9
def __init__(self, name, dpopts='--no-slicing', **kwargs): <NEW_LINE> <INDENT> AP.__init__(self, name, **kwargs) <NEW_LINE> pathCheck('ofdatapath', 'ofprotocol', moduleName='the OpenFlow reference user switch' + '(openflow.org)') <NEW_LINE> if self.listenPort: <NEW_LINE> <INDENT> self.opts += ' --listen=ptcp:%i ' % self.listenPort <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.opts += ' --listen=punix:/tmp/%s.listen' % self.name <NEW_LINE> <DEDENT> self.dpopts = dpopts
Init. name: name for the switch dpopts: additional arguments to ofdatapath (--no-slicing)
625941b79c8ee82313fbb5ba
def maybe_enter(win_id, mode, reason=None, override=False): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> _get_modeman(win_id).enter(mode, reason, override) <NEW_LINE> <DEDENT> except ModeLockedError: <NEW_LINE> <INDENT> pass
Convenience method to enter 'mode' without exceptions.
625941b796565a6dacc8f51a
def hours_to_string(h, precision=5, pad=False, sep=('h', 'm', 's')): <NEW_LINE> <INDENT> if pad: <NEW_LINE> <INDENT> if h < 0: <NEW_LINE> <INDENT> pad = 3 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> pad = 2 <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> pad = 0 <NEW_LINE> <DEDENT> if not isinstance(sep, tuple): <NEW_LINE> <INDENT> sep = tuple(sep) <NEW_LINE> <DEDENT> if len(sep) == 1: <NEW_LINE> <INDENT> sep = sep + (sep[0], '') <NEW_LINE> <DEDENT> elif len(sep) == 2: <NEW_LINE> <INDENT> sep = sep + ('',) <NEW_LINE> <DEDENT> elif len(sep) != 3: <NEW_LINE> <INDENT> raise ValueError( "Invalid separator specification for converting angle to string.") <NEW_LINE> <DEDENT> literal = ('{0:0{pad}.0f}{sep[0]}{1:02d}{sep[1]}{2:0{width}.{precision}f}' '{sep[2]}') <NEW_LINE> h, m, s = hours_to_hms(h) <NEW_LINE> return literal.format(h, abs(m), abs(s), sep=sep, pad=pad, width=(precision + 3), precision=precision)
Takes a decimal hour value and returns a string formatted as hms with separator specified by the 'sep' parameter. TODO: More detailed description here!
625941b75166f23b2e1a4f9e
def pytest_itemcollected(item): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> pathlib.Path(item.fspath.strpath).resolve().relative_to(PYTESTS_DIR) <NEW_LINE> if item.cls and issubclass(item.cls, TestCase): <NEW_LINE> <INDENT> pytest.fail( "The tests under {0!r} MUST NOT use unittest's TestCase class or a" " subclass of it. Please move {1!r} outside of {0!r}".format( str(PYTESTS_DIR.relative_to(CODE_DIR)), item.nodeid ) ) <NEW_LINE> <DEDENT> <DEDENT> except ValueError: <NEW_LINE> <INDENT> if not item.cls or (item.cls and not issubclass(item.cls, TestCase)): <NEW_LINE> <INDENT> pytest.fail( "The test {!r} appears to be written for pytest but it's not under" " {!r}. Please move it there.".format( item.nodeid, str(PYTESTS_DIR.relative_to(CODE_DIR)), pytrace=False ) )
We just collected a test item.
625941b723e79379d52ee3ad
def talk_m10_04_x83(val2=2000, z5=-2000): <NEW_LINE> <INDENT> ClearNpcMenuAction() <NEW_LINE> call = talk_m10_04_x34(action5=1203, val2=val2) <NEW_LINE> if call.Get() == 1: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> elif call.Get() == 0 and (CurrentSouls() > val2) != 1: <NEW_LINE> <INDENT> assert talk_m10_04_x19(action8=1016) <NEW_LINE> <DEDENT> elif call.Get() == 0: <NEW_LINE> <INDENT> PlayerActionRequest(9) <NEW_LINE> assert PlayerIsInEventAction(9) != 0 <NEW_LINE> AddSouls(z5) <NEW_LINE> SetEventFlag(104020098, 1) <NEW_LINE> SetAreaVariable(57, 200) <NEW_LINE> assert (GetStateTime() > GetRandomValueForStateTime(2, 2)) != 0 <NEW_LINE> SetEventFlag(104020156, 1) <NEW_LINE> assert GetEventFlag(104020156) != 0 <NEW_LINE> assert GetEventFlag(104020156) != 1 <NEW_LINE> SetAreaVariable(57, 1) <NEW_LINE> EndPlayerActionRequest() <NEW_LINE> assert talk_m10_04_x1(text1=76901200, z45=0, z60=-1, z61=0) <NEW_LINE> <DEDENT> elif call.Get() == 2: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> """State 2: Route change: End""" <NEW_LINE> ClearNpcMenuSelection() <NEW_LINE> return 0
Miracle Person: Menu Item: Route Change val2: Seoul amount z5: Subtract soul amount
625941b7eab8aa0e5d26d9a4
def on_soundfiles_added(self, mediadir, files): <NEW_LINE> <INDENT> self.soundobjects = {} <NEW_LINE> for soundf in files: <NEW_LINE> <INDENT> hsound = self.load_sound(os.path.join(mediadir, soundf)) <NEW_LINE> self.soundobjects[soundf] = hsound
Load list of soundfiles to RAM.
625941b7b57a9660fec336c5
def get_write(self): <NEW_LINE> <INDENT> self.conn = self.getconn(self.dburl) <NEW_LINE> self.c = self.conn.cursor()
Sets the cursor for the class
625941b7097d151d1a222ca2
def getter(self, fget=None): <NEW_LINE> <INDENT> self._fget = self._convert_to_classmethod(fget) <NEW_LINE> return self
Accesor decorator to change the getter on a classproperty
625941b77b180e01f3dc464c
def set_nwave(self, nwave = None): <NEW_LINE> <INDENT> if nwave is None: <NEW_LINE> <INDENT> nwave = int(self._ui.lineEdit_nwl.text()) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self._ui.lineEdit_nwl.setText(str(nwave)) <NEW_LINE> <DEDENT> self._core.n_wave = nwave
directly update the core parameters
625941b730dc7b76659017b0
def db_connect(): <NEW_LINE> <INDENT> return create_engine(URL(**settings.DATABASE))
Performs db connection using databse settings from settings.py Returns SQLAlchemy (our ORM) engine instance
625941b71b99ca400220a8f6
def _prepare_dmi(self, card, card_obj, comment=''): <NEW_LINE> <INDENT> self._prepare_dmix(DMI, self._add_dmi_object, card_obj, comment=comment)
adds a DMI
625941b7ac7a0e7691ed3f1f
def load_sample(sample_dir): <NEW_LINE> <INDENT> print ('loading sample dataset..') <NEW_LINE> lfilenames = [] <NEW_LINE> labelsnames = [] <NEW_LINE> for (dirpath, dirnames, filenames) in os.walk(sample_dir): <NEW_LINE> <INDENT> for filename in filenames: <NEW_LINE> <INDENT> filename_path = os.sep.join([dirpath, filename]) <NEW_LINE> lfilenames.append(filename_path) <NEW_LINE> labelsnames.append( dirpath.split('\\')[-1] ) <NEW_LINE> <DEDENT> <DEDENT> lab= list(sorted(set(labelsnames))) <NEW_LINE> labdict=dict( zip( lab ,list(range(len(lab))) )) <NEW_LINE> labels = [labdict[i] for i in labelsnames] <NEW_LINE> return shuffle(np.asarray( lfilenames),np.asarray( labels)),np.asarray(lab)
递归读取文件。只支持一级。返回文件名、数值标签、数值对应的标签名
625941b7a8370b77170526e6
def test_093_qrexec_service_socket_dom0_eof(self): <NEW_LINE> <INDENT> self.loop.run_until_complete(self.testvm1.start()) <NEW_LINE> self.create_local_file( '/tmp/service_script', '#!/usr/bin/python3\n' 'import socket, os, sys, time\n' 's = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n' 'os.umask(0)\n' 's.bind("/etc/qubes-rpc/test.Socket")\n' 's.listen(1)\n' 'conn, addr = s.accept()\n' 'buf = conn.recv(100)\n' 'sys.stdout.buffer.write(buf)\n' 'buf = conn.recv(10)\n' 'sys.stdout.buffer.write(buf)\n' 'sys.stdout.buffer.flush()\n' 'os.close(1)\n' 'time.sleep(15)\n' ) <NEW_LINE> self.service_proc = self.loop.run_until_complete( asyncio.create_subprocess_shell('python3 /tmp/service_script', stdout=subprocess.PIPE, stdin=subprocess.PIPE)) <NEW_LINE> try: <NEW_LINE> <INDENT> with self.qrexec_policy('test.Socket', self.testvm1, '@adminvm'): <NEW_LINE> <INDENT> p = self.loop.run_until_complete(self.testvm1.run( 'qrexec-client-vm @adminvm test.Socket', stdin=subprocess.PIPE)) <NEW_LINE> p.stdin.write(b'test1test2') <NEW_LINE> p.stdin.write_eof() <NEW_LINE> service_stdout = self.loop.run_until_complete(asyncio.wait_for( self.service_proc.stdout.read(), timeout=10)) <NEW_LINE> <DEDENT> <DEDENT> except asyncio.TimeoutError: <NEW_LINE> <INDENT> self.fail( "service timeout, probably EOF wasn't transferred from the VM process") <NEW_LINE> <DEDENT> finally: <NEW_LINE> <INDENT> with contextlib.suppress(ProcessLookupError): <NEW_LINE> <INDENT> p.terminate() <NEW_LINE> <DEDENT> self.loop.run_until_complete(p.wait()) <NEW_LINE> <DEDENT> service_descriptor = b'test.Socket+ test-inst-vm1 keyword adminvm\0' <NEW_LINE> self.assertEqual(service_stdout, service_descriptor + b'test1test2', 'Received data differs from what was expected')
Test for EOF transmission VM->dom0(socket)
625941b74d74a7450ccd4008
def print_dict(p_func, d): <NEW_LINE> <INDENT> for k, v in sorted(d.items()): <NEW_LINE> <INDENT> p_func("{} = {}".format(k, v))
Pretty print a dictionary :param p_func: printer to use (print or logging) :type d: dict
625941b70fa83653e4656e03
def _create_service_instance(self, context, device_id, service_instance_param, managed_by_user): <NEW_LINE> <INDENT> name = service_instance_param['name'] <NEW_LINE> service_type_id = service_instance_param['service_type_id'] <NEW_LINE> service_table_id = service_instance_param['service_table_id'] <NEW_LINE> mgmt_driver = service_instance_param.get('mgmt_driver') <NEW_LINE> mgmt_url = service_instance_param.get('mgmt_url') <NEW_LINE> service_instance_id = str(uuid.uuid4()) <NEW_LINE> LOG.debug('service_instance_id %s device_id %s', service_instance_id, device_id) <NEW_LINE> with context.session.begin(subtransactions=True): <NEW_LINE> <INDENT> device_db = self._get_resource(context, Device, device_id) <NEW_LINE> device_dict = self._make_device_dict(device_db) <NEW_LINE> tenant_id = self._get_tenant_id_for_create(context, device_dict) <NEW_LINE> instance_db = ServiceInstance( id=service_instance_id, tenant_id=tenant_id, name=name, service_type_id=service_type_id, service_table_id=service_table_id, managed_by_user=managed_by_user, status=constants.PENDING_CREATE, mgmt_driver=mgmt_driver, mgmt_url=mgmt_url) <NEW_LINE> context.session.add(instance_db) <NEW_LINE> context.session.flush() <NEW_LINE> binding_db = ServiceDeviceBinding( service_instance_id=service_instance_id, device_id=device_id) <NEW_LINE> context.session.add(binding_db) <NEW_LINE> <DEDENT> return self._make_service_instance_dict(instance_db)
:param service_instance_param: dictionary to create instance of ServiceInstance. The following keys are used. name, service_type_id, service_table_id, mgmt_driver, mgmt_url mgmt_driver, mgmt_url can be determined later.
625941b7462c4b4f79d1d516
@cli.group() <NEW_LINE> def data(): <NEW_LINE> <INDENT> if not jobs.current_job: <NEW_LINE> <INDENT> click.echo("No current job selected. Use >> certmailer use <jobname>") <NEW_LINE> sys.exit(1)
Manage current job's data sources
625941b77b25080760e392a1
def FreeTerms(u, x): <NEW_LINE> <INDENT> if SumQ(u): <NEW_LINE> <INDENT> result = 0 <NEW_LINE> for i in u.args: <NEW_LINE> <INDENT> if FreeQ(i, x): <NEW_LINE> <INDENT> result += i <NEW_LINE> <DEDENT> <DEDENT> return result <NEW_LINE> <DEDENT> elif FreeQ(u, x): <NEW_LINE> <INDENT> return u <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return 0
Returns the sum of the terms of u free of x. Examples ======== >>> from sympy.integrals.rubi.utility_function import FreeTerms >>> from sympy.abc import x, a, b >>> FreeTerms(a, x) a >>> FreeTerms(x*a, x) 0 >>> FreeTerms(a*x + b, x) b
625941b72c8b7c6e89b3560a
def _pds_plot_iterator(pds, dim, funcId): <NEW_LINE> <INDENT> i = 0 <NEW_LINE> for (algname, ds) in pds.algds_dimfunc((dim, funcId)): <NEW_LINE> <INDENT> yield ('algorithm', algname, ds, _style_algorithm(algname, i)) <NEW_LINE> i += 1 <NEW_LINE> <DEDENT> yield ('oracle', 'oracle', pds.oracle((dim, funcId)), _style_oracle()) <NEW_LINE> yield ('unifpf', 'eUNIF', pds.unifpf().dictByDimFunc()[dim][funcId][0], _style_unifpf()) <NEW_LINE> i = 0 <NEW_LINE> for (stratname, ds) in pds.stratds_dimfunc((dim, funcId)): <NEW_LINE> <INDENT> yield ('strategy', stratname, ds, _style_strategy(stratname, i)) <NEW_LINE> i += 1
An iterator that will in turn yield all drawable curves in the form of (kind, name, ds, style) tuples (where kind is one of 'algorithm', 'oracle', 'unifpf', 'strategy').
625941b7a79ad161976cbf8b
def predict(beta, X): <NEW_LINE> <INDENT> m = X.shape[0] <NEW_LINE> Y_prediction = np.zeros((m,1)) <NEW_LINE> w = sigmoid(X * beta) <NEW_LINE> for i in range(w.shape[0]): <NEW_LINE> <INDENT> if w[i,0] > 0.5: <NEW_LINE> <INDENT> Y_prediction[i,0] = 1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> Y_prediction[i,0] = 0 <NEW_LINE> <DEDENT> <DEDENT> return Y_prediction
Predict whether the label is 0 or 1 using learned logistic regression parameters
625941b726238365f5f0ecaf
def static_evaluate(self, compiler, expr, arg_list): <NEW_LINE> <INDENT> assert isinstance(expr, expression.OperatorExprNode) <NEW_LINE> result = compiler.init_node( expression.StaticEvaluatedExprNode(), expr.ctx) <NEW_LINE> if len(expr.child_argument_list.childs_arguments) == 1: <NEW_LINE> <INDENT> rhs = expr.child_argument_list.childs_arguments[ 0].get_static_value() <NEW_LINE> if self.txt_operator == '+': <NEW_LINE> <INDENT> result.set_static_value(rhs) <NEW_LINE> <DEDENT> elif self.txt_operator == '-': <NEW_LINE> <INDENT> result.set_static_value(- rhs) <NEW_LINE> <DEDENT> elif self.txt_operator == '!': <NEW_LINE> <INDENT> result.set_static_value(not rhs) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> assert False <NEW_LINE> <DEDENT> <DEDENT> elif len(expr.child_argument_list.childs_arguments) == 2: <NEW_LINE> <INDENT> lhs = expr.child_argument_list.childs_arguments[ 0].get_static_value() <NEW_LINE> rhs = expr.child_argument_list.childs_arguments[ 1].get_static_value() <NEW_LINE> if self.txt_operator == '+': <NEW_LINE> <INDENT> result.set_static_value(lhs + rhs) <NEW_LINE> <DEDENT> elif self.txt_operator == '-': <NEW_LINE> <INDENT> result.set_static_value(lhs - rhs) <NEW_LINE> <DEDENT> elif self.txt_operator == '*': <NEW_LINE> <INDENT> result.set_static_value(lhs * rhs) <NEW_LINE> <DEDENT> elif self.txt_operator == '/': <NEW_LINE> <INDENT> result.set_static_value(lhs / rhs) <NEW_LINE> <DEDENT> elif self.txt_operator == '%': <NEW_LINE> <INDENT> result.set_static_value(lhs % rhs) <NEW_LINE> <DEDENT> elif self.txt_operator == '<': <NEW_LINE> <INDENT> result.set_static_value(lhs < rhs) <NEW_LINE> <DEDENT> elif self.txt_operator == '>': <NEW_LINE> <INDENT> result.set_static_value(lhs > rhs) <NEW_LINE> <DEDENT> elif self.txt_operator == '>=': <NEW_LINE> <INDENT> result.set_static_value(lhs >= rhs) <NEW_LINE> <DEDENT> elif self.txt_operator == '<=': <NEW_LINE> <INDENT> result.set_static_value(lhs <= rhs) <NEW_LINE> <DEDENT> elif self.txt_operator == '==': <NEW_LINE> <INDENT> result.set_static_value(lhs == rhs) <NEW_LINE> <DEDENT> elif self.txt_operator == '!=': <NEW_LINE> <INDENT> result.set_static_value(lhs != rhs) <NEW_LINE> <DEDENT> elif self.txt_operator == '&&': <NEW_LINE> <INDENT> result.set_static_value(lhs and rhs) <NEW_LINE> <DEDENT> elif self.txt_operator == '||': <NEW_LINE> <INDENT> result.set_static_value(lhs or rhs) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> assert False <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> assert False <NEW_LINE> <DEDENT> result.set_type(self.get_type()) <NEW_LINE> result.set_replaced(expr) <NEW_LINE> return result
Do static evaluation of expressions when possible
625941b7ec188e330fd5a5ec
def read_fam(fam_file): <NEW_LINE> <INDENT> fam = pd.read_csv(fam_file, index_col=0) <NEW_LINE> fam = fam.astype({'stim_id': int}) <NEW_LINE> fam.loc[fam['condition'].isin([1, 3]), 'category'] = 'face' <NEW_LINE> fam.loc[fam['condition'].isin([2, 4]), 'category'] = 'scene' <NEW_LINE> fam.loc[(fam['stim_id'] >= 0) & (fam['stim_id'] < 30), 'subcategory'] = 'female' <NEW_LINE> fam.loc[(fam['stim_id'] >= 30) & (fam['stim_id'] < 60), 'subcategory'] = 'male' <NEW_LINE> fam.loc[(fam['stim_id'] >= 60) & (fam['stim_id'] < 90), 'subcategory'] = 'manmade' <NEW_LINE> fam.loc[(fam['stim_id'] >= 90) & (fam['stim_id'] < 120), 'subcategory'] = 'natural' <NEW_LINE> return fam
Read MTurk familiarity stats.
625941b7046cf37aa974cb91
def keltner_channel(df, n): <NEW_LINE> <INDENT> KelChM = pd.Series(pd.rolling_mean((df['High'] + df['Low'] + df['Close']) / 3, n), name = 'KelChM_' + str(n)) <NEW_LINE> KelChU = pd.Series(pd.rolling_mean((4 * df['High'] - 2 * df['Low'] + df['Close']) / 3, n), name = 'KelChU_' + str(n)) <NEW_LINE> KelChD = pd.Series(pd.rolling_mean((-2 * df['High'] + 4 * df['Low'] + df['Close']) / 3, n), name = 'KelChD_' + str(n)) <NEW_LINE> df = df.join(KelChM) <NEW_LINE> df = df.join(KelChU) <NEW_LINE> df = df.join(KelChD) <NEW_LINE> return df
Calculate Keltner Channel for given data. :param df: pandas.DataFrame :param n: :return: pandas.DataFrame
625941b7004d5f362079a17d
def imageToWeb(self): <NEW_LINE> <INDENT> self.imgStream = io.BytesIO() <NEW_LINE> try: <NEW_LINE> <INDENT> self.profileImage.save(self.imgStream, "PNG") <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> self.imgStream.seek(0) <NEW_LINE> return self.imgStream.read()
Will attempt to save the generated PIL image object as a PNG stream so it can be include in web output.
625941b73539df3088e2e191
def get_views(self): <NEW_LINE> <INDENT> Rs = [] <NEW_LINE> points = self.get_points() <NEW_LINE> for i in range(points.shape[0]): <NEW_LINE> <INDENT> longitude = - math.atan2(points[i, 0], points[i, 1]) <NEW_LINE> latitude = math.atan2(points[i, 2], math.sqrt(points[i, 0] ** 2 + points[i, 1] ** 2)) <NEW_LINE> R_x = np.array([[1, 0, 0], [0, math.cos(latitude), -math.sin(latitude)], [0, math.sin(latitude), math.cos(latitude)]]) <NEW_LINE> R_y = np.array([[math.cos(longitude), 0, math.sin(longitude)], [0, 1, 0], [-math.sin(longitude), 0, math.cos(longitude)]]) <NEW_LINE> R = R_y.dot(R_x) <NEW_LINE> Rs.append(R) <NEW_LINE> <DEDENT> return Rs
Generate a set of views to generate depth maps from. :param n_views: number of views per axis :type n_views: int :return: rotation matrices :rtype: [numpy.ndarray]
625941b723849d37ff7b2ed8
def run_tests(self, module): <NEW_LINE> <INDENT> m = [ [1, 2, 3], [4,5,6] ] <NEW_LINE> result = module.sum_and_fill( m, -1 ) <NEW_LINE> self.failUnless( 21 == result[0] ) <NEW_LINE> self.failUnless( [ [-1, -2, -3], [-4,-5,-6] ] == result[1])
Run the actual unit tests
625941b74e4d5625662d4223
def result_for_handle(self, handle): <NEW_LINE> <INDENT> if isinstance(handle, str): <NEW_LINE> <INDENT> handle = NodeHandle.from_string(handle) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> check.inst_param(handle, "handle", NodeHandle) <NEW_LINE> <DEDENT> solid = self.container.get_solid(handle) <NEW_LINE> return self._result_for_handle(solid, handle)
Get the result of a solid by its solid handle. This allows indexing into top-level solids to retrieve the results of children of composite solids. Args: handle (Union[str,NodeHandle]): The handle for the solid. Returns: Union[CompositeSolidExecutionResult, SolidExecutionResult]: The result of the given solid.
625941b7167d2b6e312189e4
def test_publish_to_lms(self): <NEW_LINE> <INDENT> course = CourseFactory() <NEW_LINE> with mock.patch.object(LMSPublisher, 'publish') as mock_publish: <NEW_LINE> <INDENT> course.publish_to_lms() <NEW_LINE> self.assertTrue(mock_publish.called)
Verify the method publishes data to LMS.
625941b71b99ca400220a8f7
def test_post_ao_image(self): <NEW_LINE> <INDENT> pass
Test case for post_ao_image Stores the AO Image # noqa: E501
625941b7be383301e01b52d3
def row_count(self, parent=None): <NEW_LINE> <INDENT> msg = 'This method must be implemented by a subclass' <NEW_LINE> raise NotImplementedError(msg)
Count the number of rows in the children of an item. Arguments --------- parent : ModelIndex or None A model item with children. Returns ------- count : int The number of rows in the model.
625941b7dd821e528d63aff1
def compute_cycle_stats_fast(cycle_df, filename): <NEW_LINE> <INDENT> cycle_stats = pd.DataFrame(index=range(len(np.unique(cycle_df['user_id']))), columns = ['user_id', 'cycle_lengths', 'period_lengths', 'inter_cycle_lengths']) <NEW_LINE> cycle_stats['user_id'] = np.unique(cycle_df['user_id']) <NEW_LINE> for index, user_id in enumerate(np.unique(cycle_df['user_id'])): <NEW_LINE> <INDENT> cycle_df_for_user = cycle_df[cycle_df['user_id'] == user_id] <NEW_LINE> cycle_lengths_for_user = np.array(cycle_df_for_user['cycle_length']) <NEW_LINE> period_lengths_for_user = np.array(cycle_df_for_user['period_length']) <NEW_LINE> inter_cycle_lengths_for_user = np.abs(cycle_lengths_for_user[:-1] - cycle_lengths_for_user[1:]) <NEW_LINE> cycle_stats.at[index, ['cycle_lengths', 'period_lengths', 'inter_cycle_lengths']] = [cycle_lengths_for_user, period_lengths_for_user, inter_cycle_lengths_for_user] <NEW_LINE> print(index) <NEW_LINE> <DEDENT> num_cycles_tracked_per_user = np.array(cycle_df.groupby('user_id')['cycle_length'].count()) <NEW_LINE> cycle_stats['num_cycles_tracked'] = num_cycles_tracked_per_user <NEW_LINE> avg_cycle_lengths = np.array(cycle_df.groupby('user_id')['cycle_length'].mean()) <NEW_LINE> cycle_stats['avg_cycle_length'] = avg_cycle_lengths <NEW_LINE> var_cycle_lengths = np.array(cycle_df.groupby('user_id')['cycle_length'].var()) <NEW_LINE> cycle_stats['var_cycle_length'] = var_cycle_lengths <NEW_LINE> cycle_stats['std_cycle_length'] = np.sqrt(var_cycle_lengths) <NEW_LINE> cycle_stats['max_cycle_length'] = [np.max(cycle_stats.iloc[i]['cycle_lengths']) for i in range(len(cycle_stats))] <NEW_LINE> cycle_stats['max_period_length'] = [np.max(cycle_stats.iloc[i]['period_lengths']) for i in range(len(cycle_stats))] <NEW_LINE> cycle_stats['median_inter_cycle_length'] = [np.median(cycle_stats.iloc[i]['inter_cycle_lengths']) for i in range(len(cycle_stats))] <NEW_LINE> cycle_stats['max_inter_cycle_length'] = [np.max(cycle_stats.iloc[i]['inter_cycle_lengths']) for i in range(len(cycle_stats))] <NEW_LINE> with open(filename, 'wb') as f: <NEW_LINE> <INDENT> pickle.dump(cycle_stats, f) <NEW_LINE> <DEDENT> print(cycle_stats.iloc[0]) <NEW_LINE> return(cycle_stats)
Compute cycle stats for desired cycle_df, save under filename; stats include cycle and period lengths, intercycle lengths (CLDs), and summary stats (mean, variance, standard deviation, max, median) Input: cycle_df (pandas dataframe): dataframe of user cycles, indexed by user ID and cycle ID filename (string): desired filename for cycle stats dataframe Output: cycle_stats (pandas dataframe): cycle stats dataframe computed from input cycle dataframe
625941b74527f215b584c2a1
def __imul__(self, *args): <NEW_LINE> <INDENT> return _vnl_vectorPython.vnl_vectorD___imul__(self, *args)
__imul__(self, double arg0) -> vnl_vectorD __imul__(self, vnl_matrixD m) -> vnl_vectorD
625941b79f2886367277a6d7
@REQUEST_TIME.time() <NEW_LINE> def process_request(t): <NEW_LINE> <INDENT> time.sleep(t) <NEW_LINE> print("Something going on")
A dummy function that takes some time.
625941b73346ee7daa2b2baf
def reset(self): <NEW_LINE> <INDENT> self.start = 0 <NEW_LINE> self.end = min(self.start+self._batch_size, self._total_size) <NEW_LINE> if self._random_batches: <NEW_LINE> <INDENT> self.ordering = self._rng.permutation(self._total_size) <NEW_LINE> self.value = self.ordering[self.start:self.end] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.value = slice(self.start, self.end) <NEW_LINE> <DEDENT> for node in self._dropout_nodes: <NEW_LINE> <INDENT> node.draw_new_mask()
Reset the state of the Kayak Batcher. It may happen that you want to 'reset the loop' and restart your iteration over the data. Calling this method does that. If, in the constructor, you set rng=None, then you'll go back to zero. If random_batches is true, you will get a new random permutation when you reset. This method is automatically called when the iterator completes its loop, so you don't need to explicitly call it when you're making multiple loops over the data. Arguments: None
625941b732920d7e50b28012
def precis_engine_tasks_get(self, **kwargs): <NEW_LINE> <INDENT> kwargs['_return_http_data_only'] = True <NEW_LINE> if kwargs.get('async_req'): <NEW_LINE> <INDENT> return self.precis_engine_tasks_get_with_http_info(**kwargs) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> (data) = self.precis_engine_tasks_get_with_http_info(**kwargs) <NEW_LINE> return data
Get all precisEngineTasks # noqa: E501 Get all engineTasks details # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.precis_engine_tasks_get(async_req=True) >>> result = thread.get() :param async_req bool :param str where: The where clause takes a JSON as a string with one or many properties of the engineTask model. Example: * To find precisEngineTasks with capbilitiesType equal 211 and sourceId equal "4", use /precisEngineTasks?where={"capbilitiesType":322,"source.sourceId":"4"} * To find precisEngineTasks with destination.extras.value equal "1553774721506487", use /precisEngineTasks?where={"destination.extras.value":"1553774721506487"} :param int page: The page clause takes a the page number you want to query. Example: * To find engine tasks at page no 4, use /engines?page=4 :param str sort: The sort query parameter sorts the result set in ascending and desending order by one of the property of the result set. Example: * To sort engineTasks by created IN ASCEDING order, use /precisEngineTasks?sort=created * To sort engineTasks by created IN DECENDING order, use /precisEngineTasks?sort=-created * Please note the - (minus) sign in front of the created, that indicates inverse of ASCENDING :param int max_results: The maxResults query parameter limits results equal to # of maxResults. Example: * To get latest engineTask among whole engineTasks, use /precisEngineTasks?maxResults=1 * To limit engineTasks to 2, use /precisEngineTasks?maxResults=2 :param str embedded: The embedded clause takes a JSON as a string with sourceEndPoint argument. Example: * 'To find engineTasks with sourceEndPoint object. use /precisEngineTasks?embedded={"sourceEndPoint":1}' :return: PrecisEngineTasksResponse If the method is called asynchronously, returns the request thread.
625941b7dd821e528d63aff2
def get_kilink(self, kid, revno): <NEW_LINE> <INDENT> session = self.sm.get_session() <NEW_LINE> try: <NEW_LINE> <INDENT> klnk = session.query(Kilink).filter_by( kid=kid, revno=revno).one() <NEW_LINE> <DEDENT> except NoResultFound: <NEW_LINE> <INDENT> msg = "Data not found for kilink=%r revno=%r" % (kid, revno) <NEW_LINE> raise KilinkNotFoundError(msg) <NEW_LINE> <DEDENT> return klnk
Get a specific kilink and revision number.
625941b76aa9bd52df036be8
def isAnagram(self, s, t): <NEW_LINE> <INDENT> if len(s)!= len(t): <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> m1 = collections.Counter(s) <NEW_LINE> m2 = collections.Counter(t) <NEW_LINE> for k in m1.keys(): <NEW_LINE> <INDENT> if m1[k] != m2[k]: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> <DEDENT> return True
:type s: str :type t: str :rtype: bool
625941b724f1403a926009b0
def handle(self): <NEW_LINE> <INDENT> dlg_handle_list = application.findwindows.find_windows( self._class_name, self._class_name_re, self._parent, self._process, self._title, self._title_re, self._top_level_only, self._visible_only, self._enabled_only, self._best_match, self._handle, self._ctrl_index, self._predicate_func, self._active_only ) <NEW_LINE> if (len(dlg_handle_list) == 0): <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> handle_id = dlg_handle_list[0] <NEW_LINE> if not(handleprops.iswindow(handle_id)): <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> hwnd = controls.HwndWrapper.HwndWrapper(handle_id) <NEW_LINE> child_ctrls_list = hwnd.Children() <NEW_LINE> if len(self._static_text) > 0: <NEW_LINE> <INDENT> found = False <NEW_LINE> for child_obj in child_ctrls_list: <NEW_LINE> <INDENT> if child_obj.Class() == "Static" and self._static_text in child_obj.WindowText(): <NEW_LINE> <INDENT> found = True <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> if not found: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> <DEDENT> self._set_properties(handle_id) <NEW_LINE> if len(self._button_name) > 0: <NEW_LINE> <INDENT> for child_obj in child_ctrls_list: <NEW_LINE> <INDENT> if child_obj.Class() == "Button" and self._button_name == child_obj.WindowText(): <NEW_LINE> <INDENT> child_obj.Click() <NEW_LINE> return True <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> if self._key_string: <NEW_LINE> <INDENT> hwnd.TypeKeys(self._key_string) <NEW_LINE> return True <NEW_LINE> <DEDENT> if len(self._coords_list): <NEW_LINE> <INDENT> for coords in self._coords_list: <NEW_LINE> <INDENT> hwnd.ClickInput(coords = coords) <NEW_LINE> <DEDENT> <DEDENT> hwnd.Close() <NEW_LINE> return False
Find the dialog with the specified title and text. Then click at the specified button or send the key string to it to close it.
625941b792d797404e303fd1
def process_photos(rpis, camera): <NEW_LINE> <INDENT> logger.info("thread running") <NEW_LINE> while True: <NEW_LINE> <INDENT> if not camera.queue.empty(): <NEW_LINE> <INDENT> if rpis.state.current == 'armed': <NEW_LINE> <INDENT> logger.debug('Running arp_ping_macs before sending photos...') <NEW_LINE> rpis.arp_ping_macs() <NEW_LINE> time.sleep(2) <NEW_LINE> while True: <NEW_LINE> <INDENT> if rpis.state.current != 'armed': <NEW_LINE> <INDENT> camera.clear_queue() <NEW_LINE> break <NEW_LINE> <DEDENT> photo = camera.queue.get() <NEW_LINE> if photo is None: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> logger.debug('Processing the photo: {0}'.format(photo)) <NEW_LINE> rpis.state.update_triggered(True) <NEW_LINE> rpis.telegram_send_message('Motioned detected') <NEW_LINE> if rpis.telegram_send_file(photo): <NEW_LINE> <INDENT> camera.queue.task_done() <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> logger.debug('Stopping photo processing as state is now {0} and clearing queue'.format(rpis.state.current)) <NEW_LINE> camera.queue.queue.clear() <NEW_LINE> <DEDENT> <DEDENT> time.sleep(0.1)
Monitors the captured_from_camera list for newly captured photos. When a new photos are present it will run arp_ping_macs to remove false positives and then send the photos via Telegram. After successfully sendind the photo it will also archive the photo and remove it from the list.
625941b716aa5153ce3622be
def process_song_data(spark, input_data, output_data): <NEW_LINE> <INDENT> song_data = os.path.join(input_data, "song_data/*/*/*") <NEW_LINE> df = spark.read.json(song_data) <NEW_LINE> songs_table = df.select("song_id", "title", "artist_id", "year", "duration") <NEW_LINE> songs_table.write.mode("overwrite").partitionBy("year","artist_id").parquet(os.path.join(output_data, "songs")) <NEW_LINE> artists_table = df.select("artist_id", "artist_name", "artist_location", "artist_latitude", "artist_longitude") <NEW_LINE> artists_table.write.mode("overwrite").parquet(os.path.join(output_data, "artists"))
Description: Process the songs data files and create extract songs table and artist table data from it. :param spark: a spark session instance :param input_data: input file path :param output_data: output file path
625941b760cbc95b062c638f
def open_from_file(self, path): <NEW_LINE> <INDENT> self.__html = open(path).read()
open file and get html
625941b7e8904600ed9f1d6f
def run(self): <NEW_LINE> <INDENT> result, loss = self._run() <NEW_LINE> if self._convergence_loss: <NEW_LINE> <INDENT> return result, loss <NEW_LINE> <DEDENT> return result, None
run the program Returns ------- result : np.ndarray, shape (num of items, num of time step +1) prediction result, included the original series loss : list[float] if self.convergence_loss == True else None Convergence loss
625941b74c3428357757c171
def test_missing_header_value(self): <NEW_LINE> <INDENT> with open("tests/testdata-missing-header-values.cef", "r") as f: <NEW_LINE> <INDENT> for l in f.readlines(): <NEW_LINE> <INDENT> d = pycef.parse(l) <NEW_LINE> self.assertIsNone(d, "Tried to parse a record with one or more missing CEF header values.")
Test that we don't try to incorrectly parse a record where one or more of the CEF header values are missing.
625941b726238365f5f0ecb0
def restrict_project_kind(*kinds): <NEW_LINE> <INDENT> def decorator(func): <NEW_LINE> <INDENT> def decorated(*args, **kwargs): <NEW_LINE> <INDENT> project = kwargs['slug'] <NEW_LINE> project = get_object_or_404(Project, slug=project) <NEW_LINE> if not project.category in kinds: <NEW_LINE> <INDENT> msg = _("This page is not accesible on a %s.") <NEW_LINE> return HttpResponseForbidden(msg % project.kind.lower()) <NEW_LINE> <DEDENT> return func(*args, **kwargs) <NEW_LINE> <DEDENT> return decorated <NEW_LINE> <DEDENT> return decorator
Do not allow the access to this view if the project kind is not one of the given kinds.
625941b7d8ef3951e3243384
def _parse_singlefile(self, desired_type: Type[T], file_path: str, encoding: str, logger: Logger, options: Dict[str, Dict[str, Any]]) -> T: <NEW_LINE> <INDENT> raise Exception('Not implemented since this is a MultiFileParser')
Implementation of the parent method : since this is a multifile parser, this is not implemented. :param desired_type: :param file_path: :param encoding: :param logger: :param options: :return:
625941b7ab23a570cc24ffc6
def run_tests(self, config=None): <NEW_LINE> <INDENT> if self.pre_test_hook: <NEW_LINE> <INDENT> if self.pre_test_hook() is False: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> <DEDENT> if config is None: <NEW_LINE> <INDENT> config = self.config <NEW_LINE> <DEDENT> pythonpath = self.pythonpath <NEW_LINE> self.testdatamodel.testresults = [] <NEW_LINE> self.testdetails = [] <NEW_LINE> tempfilename = get_conf_path('unittest.results') <NEW_LINE> self.testrunner = self.framework_registry.create_runner( config.framework, self, tempfilename) <NEW_LINE> self.testrunner.sig_finished.connect(self.process_finished) <NEW_LINE> self.testrunner.sig_collected.connect(self.tests_collected) <NEW_LINE> self.testrunner.sig_collecterror.connect(self.tests_collect_error) <NEW_LINE> self.testrunner.sig_starttest.connect(self.tests_started) <NEW_LINE> self.testrunner.sig_testresult.connect(self.tests_yield_result) <NEW_LINE> self.testrunner.sig_stop.connect(self.tests_stopped) <NEW_LINE> try: <NEW_LINE> <INDENT> self.testrunner.start(config, pythonpath) <NEW_LINE> <DEDENT> except RuntimeError: <NEW_LINE> <INDENT> QMessageBox.critical(self, _("Error"), _("Process failed to start")) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.set_running_state(True) <NEW_LINE> self.set_status_label(_('Running tests ...'))
Run unit tests. First, run `self.pre_test_hook` if it is set, and abort if its return value is `False`. Then, run the unit tests. The process's output is consumed by `read_output()`. When the process finishes, the `finish` signal is emitted. Parameters ---------- config : Config or None configuration for unit tests. If None, use `self.config`. In either case, configuration should be valid.
625941b773bcbd0ca4b2bec4
def ex_node_attach_disk(self, node, disk): <NEW_LINE> <INDENT> op = self.connection.request('hosting.vm.disk_attach', int(node.id), int(disk.id)) <NEW_LINE> if self._wait_operation(op.object['id']): <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> return False
Specific method to attach a disk to a node @param node: Node which should be used @type node: L{Node} @param disk: Disk which should be used @type disk: L{GandiDisk} @rtype: C{bool}
625941b78e05c05ec3eea1b8
def upgrade(self): <NEW_LINE> <INDENT> if self.applied(): <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> queries = ['DROP PROCEDURE IF EXISTS `get_organization_id`'] <NEW_LINE> return self.manager.db_upgrade(queries)
Runs the upgrade queries
625941b7bf627c535bc1301d
def DoGetBestSize(self): <NEW_LINE> <INDENT> return wx.Size(200, 150)
Returns the best size for L{PeakMeterCtrl} (arbitrary).
625941b79f2886367277a6d8
def run(): <NEW_LINE> <INDENT> args = parse_args() <NEW_LINE> assert args.batch_size % args.gpu_num == 0 <NEW_LINE> assert args.gru_hidden_size % 2 == 0 <NEW_LINE> logger = logging.getLogger("GACM") <NEW_LINE> logger.setLevel(logging.INFO) <NEW_LINE> formatter = logging.Formatter('%(asctime)s - %(name)s - %(message)s') <NEW_LINE> check_path(args.save_dir) <NEW_LINE> check_path(args.load_dir) <NEW_LINE> check_path(args.result_dir) <NEW_LINE> check_path(args.summary_dir) <NEW_LINE> if args.log_dir: <NEW_LINE> <INDENT> check_path(args.log_dir) <NEW_LINE> file_handler = logging.FileHandler(args.log_dir + time.strftime('%Y-%m-%d-%H:%M:%S',time.localtime(time.time())) + '.txt') <NEW_LINE> file_handler.setLevel(logging.INFO) <NEW_LINE> file_handler.setFormatter(formatter) <NEW_LINE> logger.addHandler(file_handler) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> console_handler = logging.StreamHandler() <NEW_LINE> console_handler.setLevel(logging.INFO) <NEW_LINE> console_handler.setFormatter(formatter) <NEW_LINE> logger.addHandler(console_handler) <NEW_LINE> <DEDENT> logger.info('Running with args : {}'.format(args)) <NEW_LINE> logger.info('Checking the directories...') <NEW_LINE> for dir_path in [args.save_dir, args.result_dir, args.summary_dir]: <NEW_LINE> <INDENT> if not os.path.exists(dir_path): <NEW_LINE> <INDENT> os.makedirs(dir_path) <NEW_LINE> <DEDENT> <DEDENT> global Dataset <NEW_LINE> global Agent <NEW_LINE> logger.info('Agent version: {}.0'.format(args.agent_version)) <NEW_LINE> logger.info('Dataset version: {}.0'.format(args.dataset_version)) <NEW_LINE> logger.info('Checking the directories...') <NEW_LINE> Dataset = importlib.import_module('dataset{}'.format(args.dataset_version)).Dataset <NEW_LINE> Agent = importlib.import_module('Agent{}'.format(args.agent_version)).Agent <NEW_LINE> if args.pretrain: <NEW_LINE> <INDENT> pretrain(args) <NEW_LINE> <DEDENT> if args.train: <NEW_LINE> <INDENT> train(args) <NEW_LINE> <DEDENT> if args.test: <NEW_LINE> <INDENT> test(args) <NEW_LINE> <DEDENT> if args.rank: <NEW_LINE> <INDENT> rank(args) <NEW_LINE> <DEDENT> if args.generate_synthetic_dataset: <NEW_LINE> <INDENT> generate_synthetic_dataset(args) <NEW_LINE> <DEDENT> logger.info('run done.')
Prepares and runs the whole system.
625941b7a934411ee37514e2
def entity_oblpn_by_id_get(self, id, **kwargs): <NEW_LINE> <INDENT> kwargs['_return_http_data_only'] = True <NEW_LINE> if kwargs.get('async_req'): <NEW_LINE> <INDENT> return self.entity_oblpn_by_id_get_with_http_info(id, **kwargs) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> (data) = self.entity_oblpn_by_id_get_with_http_info(id, **kwargs) <NEW_LINE> return data
EntityOblpnById_GET # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.entity_oblpn_by_id_get(id, async_req=True) >>> result = thread.get() :param async_req bool :param float id: (required) :param str fields: :return: object If the method is called asynchronously, returns the request thread.
625941b77d43ff24873a2aea
def InjectSignal(self, signal): <NEW_LINE> <INDENT> assert isinstance(signal, Signal) <NEW_LINE> self.signals.append(signal)
시그널을 주입하는 메소드. Parameters ---------- signal : Signal or its subclass Returns ------- None Raises ------ AssertionError 주입한 파라메터가 Signal 이나 Signal 의 서브클래스가 아님. See Also -------- .Signal : for Injecting Signal (but it is null, pure-class) .GaussianSignal : Example (not-null) signal
625941b721bff66bcd68479c
def containsConsonants(self, selList, k, mode): <NEW_LINE> <INDENT> matches = self.matchConsonants(selList, k, mode) <NEW_LINE> return compareByMode(len(matches), k, mode)
Returns true if exactly* k of the consonants in selList appear in this language. Use mode (less than, etc) instead of exact equality checking
625941b75fcc89381b1e150b
def fetch_all_versions(self) -> Iterable[Tuple["PackageVersion", bool]]: <NEW_LINE> <INDENT> resp = requests.get("https://pypi.org/pypi/%s/json" % self.name) <NEW_LINE> resp.raise_for_status() <NEW_LINE> pv_and_cs = [] <NEW_LINE> releases = resp.json()["releases"] <NEW_LINE> for release in releases: <NEW_LINE> <INDENT> pv_and_cs.append( PackageVersion.objects.get_or_create(package=self, version=release) ) <NEW_LINE> <DEDENT> return pv_and_cs
Checks PyPI for the latest version, then get_or_create-s an appropriate PackageVersion
625941b7cc40096d6159579a
def linear(self) -> int: <NEW_LINE> <INDENT> return 1 if (self.power == 1) and (self.factorial == 0) else 0
Returns 1 if self only has linear properties
625941b7187af65679ca4f64
def style_range(sheet, cell_range, border=Border(), fill=None, font=None, alignment=None): <NEW_LINE> <INDENT> top = Border(top=border.top) <NEW_LINE> left = Border(left=border.left) <NEW_LINE> right = Border(right=border.right) <NEW_LINE> bottom = Border(bottom=border.bottom) <NEW_LINE> first_cell = sheet[cell_range.split(":")[0]] <NEW_LINE> if alignment: <NEW_LINE> <INDENT> sheet.merge_cells(cell_range) <NEW_LINE> first_cell.alignment = alignment <NEW_LINE> <DEDENT> rows = sheet[cell_range] <NEW_LINE> if font: <NEW_LINE> <INDENT> first_cell.font = font <NEW_LINE> <DEDENT> for cell in rows[0]: <NEW_LINE> <INDENT> cell.border = cell.border + top <NEW_LINE> <DEDENT> for cell in rows[-1]: <NEW_LINE> <INDENT> cell.border = cell.border + bottom <NEW_LINE> <DEDENT> for row in rows: <NEW_LINE> <INDENT> l = row[0] <NEW_LINE> r = row[-1] <NEW_LINE> l.border = l.border + left <NEW_LINE> r.border = r.border + right <NEW_LINE> if fill: <NEW_LINE> <INDENT> for c in row: <NEW_LINE> <INDENT> c.fill = fill
Apply styles to a range of cells as if they were a single cell. :param sheet: Excel worksheet instance :param range: An excel range to style (e.g. A1:F20) :param border: An openpyxl Border :param fill: An openpyxl PatternFill or GradientFill :param font: An openpyxl Font object
625941b7e64d504609d74687
def _get_full_repr_dict(self): <NEW_LINE> <INDENT> ans = OrderedDict() <NEW_LINE> ans['info'] = {'__class__' : str(self.__class__) } <NEW_LINE> ans['userdata'] = '' <NEW_LINE> ans['plotdata'] = '' <NEW_LINE> return ans
people can overload get_repr without having to call super, but just get the standard dict format from this method
625941b766656f66f7cbbff1
def enable_depth_peeling(self): <NEW_LINE> <INDENT> self._renderer.enable_depth_peeling()
Enable depth peeling.
625941b7566aa707497f43c1
def setData(): <NEW_LINE> <INDENT> global x_data <NEW_LINE> global y_data <NEW_LINE> global fCoefficients <NEW_LINE> global fPrimeCoefficients <NEW_LINE> x_path = join('..','Data','CrimeXData.bin') <NEW_LINE> y_path = join('..','Data','CrimeYData.bin') <NEW_LINE> c_path = join('..','Data','Coefficients.bin') <NEW_LINE> cp_path = join('..','Data','FPrimeCoefficients.bin') <NEW_LINE> with open(c_path,'rb') as file: <NEW_LINE> <INDENT> fCoefficients = pickle.load(file) <NEW_LINE> <DEDENT> with open(cp_path,'rb') as file: <NEW_LINE> <INDENT> fPrimeCoefficients = pickle.load(file) <NEW_LINE> <DEDENT> with open(x_path,'rb') as file: <NEW_LINE> <INDENT> x_data = pickle.load(file) <NEW_LINE> <DEDENT> with open(y_path,'rb') as file: <NEW_LINE> <INDENT> y_data = pickle.load(file)
Read all data from binary files to be plotted.
625941b7fbf16365ca6f6003
def run(modules, presets = [], project_directory = '.', options = None): <NEW_LINE> <INDENT> if options is None: <NEW_LINE> <INDENT> options = TreeDict() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if type(options) is not TreeDict: <NEW_LINE> <INDENT> raise TypeError("options parameter needs to be a TreeDict.") <NEW_LINE> <DEDENT> <DEDENT> options.project_directory = project_directory <NEW_LINE> m = RunManager(options) <NEW_LINE> return m.getResults(modules, presets)
Convenience function for running things directly. `options`, if given, should be a TreeDict of configuration options.
625941b7e1aae11d1e749afb
def enqueue(self, e): <NEW_LINE> <INDENT> newest = self._node(e, None) <NEW_LINE> if self.is_empty(): <NEW_LINE> <INDENT> newest._next = newest <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> newest._next = self._tail._next <NEW_LINE> self._tail._next = newest <NEW_LINE> <DEDENT> self._tail = newest <NEW_LINE> self._size += 1
Add element to the back of the queue
625941b7dc8b845886cb537b
def sumOfLeftLeaves(self, root): <NEW_LINE> <INDENT> sum_ = 0 <NEW_LINE> stack = [] <NEW_LINE> while root or stack: <NEW_LINE> <INDENT> if root: <NEW_LINE> <INDENT> if root.left and not root.left.left and not root.left.right: <NEW_LINE> <INDENT> sum_ += root.left.val <NEW_LINE> root = root.right <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> stack.append(root.right) <NEW_LINE> root = root.left <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> root = stack.pop() <NEW_LINE> <DEDENT> <DEDENT> return sum_
:type root: TreeNode :rtype: int
625941b726068e7796caeb1f
def verify(self, public_key: PublicKey) -> bool: <NEW_LINE> <INDENT> return self.crypto.is_valid_signature(public_key, self.get_plaintext(), self.signature)
Verify if a public key belongs to this object. :returns: whether the given public key has signed for this object.
625941b7a17c0f6771cbde9b
def remove_this_clink(self,clink_id): <NEW_LINE> <INDENT> for clink in self.get_clinks(): <NEW_LINE> <INDENT> if clink.get_id() == clink_id: <NEW_LINE> <INDENT> self.node.remove(clink.get_node()) <NEW_LINE> break
Removes the clink for the given clink identifier @type clink_id: string @param clink_id: the clink identifier to be removed
625941b7adb09d7d5db6c5da
def expand(self, X): <NEW_LINE> <INDENT> Xexp = [] <NEW_LINE> for i in range(X.shape[1]): <NEW_LINE> <INDENT> for k in np.arange(0, self.max[i] + self.step, self.step): <NEW_LINE> <INDENT> Xexp += [np.tanh((X[:, i] - k) / self.step)] <NEW_LINE> <DEDENT> <DEDENT> return np.array(Xexp).T
Binarize features. Parameters: ---------- X: np.ndarray Features Returns: ------- X: np.ndarray Binarized features
625941b763f4b57ef0000f69
def test_read_form_input_entries_normal_user_fail_no_permission( client: TestClient, db: Session ) -> None: <NEW_LINE> <INDENT> form_input = crud.form_input.get_by_template_table_name( db, table_name="form_input_test_table" ) <NEW_LINE> user = create_random_user(db) <NEW_LINE> [create_random_form_input_table_entry(db) for i in range(10)] <NEW_LINE> user_token_headers = authentication_token_from_email( client=client, email=user.email, db=db ) <NEW_LINE> response = client.get( f"{settings.API_V1_STR}/interfaces/form-inputs/{form_input.id}/entries/", headers=user_token_headers, ) <NEW_LINE> content = response.json() <NEW_LINE> assert response.status_code == 403 <NEW_LINE> assert content["detail"] == ( f"User ID {user.id} does not have read permissions for " f"interface ID {form_input.id}" )
Fail if the user doesn't have read permission on the interface
625941b7be7bc26dc91cd44d
def visit_list(self, ctx_list): <NEW_LINE> <INDENT> return [self.visit(ctx) for ctx in ctx_list]
"Visit a list of contexts.
625941b80fa83653e4656e04
@array_function_dispatch(_split_dispatcher) <NEW_LINE> def rsplit(a, sep=None, maxsplit=None): <NEW_LINE> <INDENT> return _vec_string( a, object_, 'rsplit', [sep] + _clean_args(maxsplit))
For each element in `a`, return a list of the words in the string, using `sep` as the delimiter string. Calls `str.rsplit` element-wise. Except for splitting from the right, `rsplit` behaves like `split`. Parameters ---------- a : array_like of str or unicode sep : str or unicode, optional If `sep` is not specified or `None`, any whitespace string is a separator. maxsplit : int, optional If `maxsplit` is given, at most `maxsplit` splits are done, the rightmost ones. Returns ------- out : ndarray Array of list objects See also -------- str.rsplit, split
625941b8796e427e537b040a
def xedit_zwXDat(df): <NEW_LINE> <INDENT> df =df.rename(columns={'Open':'open','High':'high','Low':'low','Close':'close','Volume':'volume'}) <NEW_LINE> df.sort_index(ascending=True,inplace=True) <NEW_LINE> dx=df['open']; <NEW_LINE> df['xdiff']=df['high']-df['low'] <NEW_LINE> df['z-xdiff']=df['xdiff']*1000/dx;df['z-xdiff']=df['z-xdiff'].round(0) <NEW_LINE> df['z-open']=df['open']*1000/dx.shift(1);df['z-open']=df['z-open'].round(0) <NEW_LINE> df['z-high']=df['high']*1000/dx;df['z-high']=df['z-high'].round(0) <NEW_LINE> df['z-low']=df['low']*1000/dx;df['z-low']=df['z-low'].round(0) <NEW_LINE> df['z-close']=df['close']*1000/dx;df['z-close']=df['z-close'].round(0) <NEW_LINE> df['ma5']=pd.rolling_mean(df['close'],window=5); <NEW_LINE> df['ma10']=pd.rolling_mean(df['close'],window=10); <NEW_LINE> df['ma20']=pd.rolling_mean(df['close'],window=20); <NEW_LINE> df['ma30']=pd.rolling_mean(df['close'],window=30); <NEW_LINE> df['v-ma5']=pd.rolling_mean(df['volume'],window=5); <NEW_LINE> df['v-ma10']=pd.rolling_mean(df['volume'],window=10); <NEW_LINE> df['v-ma20']=pd.rolling_mean(df['volume'],window=20); <NEW_LINE> df['v-ma30']=pd.rolling_mean(df['volume'],window=30); <NEW_LINE> c20=df.columns; <NEW_LINE> if ('amount' in c20):del(df['amount']); <NEW_LINE> if ('Adj Close' in c20):del(df['Adj Close']); <NEW_LINE> df=df.round(decimals=2) <NEW_LINE> clst=["open","high","low","close","volume","xdiff","z-open","z-high","z-low","z-close","z-xdiff","ma5","ma10","ma20","ma30","v-ma5","v-ma10","v-ma20","v-ma30"] <NEW_LINE> d30=pd.DataFrame(df,columns=clst) <NEW_LINE> return d30
编辑用户数据格式 Args: df (pd.DataFrame): 股票数据
625941b715baa723493c3db9
def _microarch_target_args(self): <NEW_LINE> <INDENT> targetlist_name = join_path(self.stage.source_path, "TargetList.txt") <NEW_LINE> if os.path.exists(targetlist_name): <NEW_LINE> <INDENT> with open(targetlist_name) as f: <NEW_LINE> <INDENT> available_targets = self._read_targets(f) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> available_targets = [] <NEW_LINE> <DEDENT> microarch = self.spec.target <NEW_LINE> args = [] <NEW_LINE> openblas_arch = set(['alpha', 'arm', 'ia64', 'mips', 'mips64', 'power', 'sparc', 'zarch']) <NEW_LINE> openblas_arch_map = { 'amd64': 'x86_64', 'powerpc64': 'power', 'i386': 'x86', 'aarch64': 'arm64', } <NEW_LINE> openblas_arch.update(openblas_arch_map.keys()) <NEW_LINE> openblas_arch.update(openblas_arch_map.values()) <NEW_LINE> skylake = set(["skylake", "skylake_avx512"]) <NEW_LINE> available_targets = set(available_targets) | skylake | openblas_arch <NEW_LINE> if microarch.name not in available_targets: <NEW_LINE> <INDENT> for microarch in microarch.ancestors: <NEW_LINE> <INDENT> if microarch.name in available_targets: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> if self.version >= Version("0.3"): <NEW_LINE> <INDENT> arch_name = microarch.family.name <NEW_LINE> if arch_name in openblas_arch: <NEW_LINE> <INDENT> arch_name = openblas_arch_map.get(arch_name, arch_name) <NEW_LINE> args.append('ARCH=' + arch_name) <NEW_LINE> <DEDENT> <DEDENT> if microarch.vendor == 'generic': <NEW_LINE> <INDENT> args.append('DYNAMIC_ARCH=1') <NEW_LINE> if self.spec.version >= Version('0.3.12'): <NEW_LINE> <INDENT> args.extend(['DYNAMIC_OLDER=1', 'TARGET=GENERIC']) <NEW_LINE> <DEDENT> <DEDENT> elif microarch.name in skylake: <NEW_LINE> <INDENT> args.append('TARGET=SKYLAKEX') <NEW_LINE> if microarch.name == "skylake": <NEW_LINE> <INDENT> args.append('NO_AVX512=1') <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> args.append('TARGET=' + microarch.name.upper()) <NEW_LINE> <DEDENT> return args
Given a spack microarchitecture and a list of targets found in OpenBLAS' TargetList.txt, determine the best command-line arguments.
625941b8eab8aa0e5d26d9a6
def run(self): <NEW_LINE> <INDENT> self._main_loop(self._proc_pipe)
Start the player.
625941b8b57a9660fec336c7
def _ebServiceUnknown(self, failure): <NEW_LINE> <INDENT> failure.trap(error.ServiceNameUnknownError) <NEW_LINE> self.servers = [(0, 0, self.domain, self._defaultPort)] <NEW_LINE> self.orderedServers = [] <NEW_LINE> self.connect()
Connect to the default port when the service name is unknown. If no SRV records were found, the service name will be passed as the port. If resolving the name fails with L{error.ServiceNameUnknownError}, a final attempt is done using the default port.
625941b8f548e778e58cd3c3
def get_api_url(): <NEW_LINE> <INDENT> return 'https://api.basespace.illumina.com/v1pre3'
Get base BaseSpace API URL.
625941b8097d151d1a222ca4
def f1(self): <NEW_LINE> <INDENT> return len(self.valid_actions)
Mobility ---------------- A number of choice that player can make
625941b8cc0a2c11143dcce0
def ask_timelimit(prompt): <NEW_LINE> <INDENT> def ask_date(prompt_date): <NEW_LINE> <INDENT> date = input("{}\n".format(prompt_date)) <NEW_LINE> if not date: <NEW_LINE> <INDENT> return '' <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> time.strptime(date, '%Y-%m-%d') <NEW_LINE> return date <NEW_LINE> <DEDENT> except ValueError: <NEW_LINE> <INDENT> print("Wrong value entered. Please choose again.\n") <NEW_LINE> return ask_date(prompt_date) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> limit = ask_yes_or_no(prompt) <NEW_LINE> if limit: <NEW_LINE> <INDENT> low = ask_date("Specify lower boundary of time period (format: yyyy-mm-dd) or press ENTER for none.\n") <NEW_LINE> upp = ask_date("Specify upper boundary of time period (format: yyyy-mm-dd) or press ENTER for none.\n") <NEW_LINE> if not low: <NEW_LINE> <INDENT> low = '1900-01-01' <NEW_LINE> <DEDENT> if not upp: <NEW_LINE> <INDENT> upp = 'NOW()' <NEW_LINE> <DEDENT> return "creation_time BETWEEN '{}' AND '{}' ".format(low, upp) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return "creation_time BETWEEN '1900-01-01' AND NOW() "
Ask user if they want to limit their search results by time. If yes, limit search by dates entered by user. Question will be asked recursively until user enters 'y' or 'n'. If 'y' nested function ask_date() will be called to specify lower and upper time boundary. If 'n' time boundaries will be set: lower to '1900-01-01', upper to now. Parameters ---------- prompt (str): Text of question printed to the user. Returns ------- str: Result is formatted to serve as condition for SQL WHERE clause. Ex. "creation_time BETWEEN '1900-01-01' AND NOW() " Ex. "creation_time BETWEEN '2019-01-01' AND '2019-04-30' "
625941b80a50d4780f666cd7
def set_boundary(self, boundary): <NEW_LINE> <INDENT> self.boundary = quote_plus(boundary)
Sets the boundary in the context of a Form, this is usually called by the `Form` class because all Form objects should have the same boundary key. :param boundary: A unique string that is the boundary :type boundary: str
625941b8d99f1b3c44c673df
def check_existing_credentials(account_name): <NEW_LINE> <INDENT> return Credentials.credentials_exist(account_name)
Function that check if a credentials exists with that number and return a Boolean
625941b8ac7a0e7691ed3f21
def BOP(equity, start=None, end=None): <NEW_LINE> <INDENT> opn = np.array(equity.hp.loc[start:end, 'open'], dtype='f8') <NEW_LINE> high = np.array(equity.hp.loc[start:end, 'high'], dtype='f8') <NEW_LINE> low = np.array(equity.hp.loc[start:end, 'low'], dtype='f8') <NEW_LINE> close = np.array(equity.hp.loc[start:end, 'close'], dtype='f8') <NEW_LINE> real = talib.BOP(opn, high, low, close) <NEW_LINE> return real
Balance Of Power :return:
625941b8283ffb24f3c55754
def __getitem__(self, colid): <NEW_LINE> <INDENT> return getattr(self, colid)
return a specific cell - colid: cell identifier
625941b8bde94217f3682c44
def request_access_token(self, code, redirect_uri=None): <NEW_LINE> <INDENT> redirect = redirect_uri or self._redirect_uri <NEW_LINE> resp_text = _http('POST', 'https://api.weibo.com/oauth2/access_token', client_id=self._client_id, client_secret=self._client_secret, redirect_uri=redirect, code=code, grant_type='authorization_code') <NEW_LINE> r = _parse_json(resp_text) <NEW_LINE> current = int(time.time()) <NEW_LINE> expires = r.expires_in + current <NEW_LINE> remind_in = r.get('remind_in', None) <NEW_LINE> if remind_in: <NEW_LINE> <INDENT> rtime = int(remind_in) + current <NEW_LINE> if rtime < expires: <NEW_LINE> <INDENT> expires = rtime <NEW_LINE> <DEDENT> <DEDENT> return JsonDict(access_token=r.access_token, expires=expires, uid=r.get('uid', None))
Return access token as a JsonDict: {"access_token":"your-access-token","expires":12345678,"uid":1234}, expires is represented using standard unix-epoch-time
625941b8e5267d203edcdae9
def to_up(self, _=None): <NEW_LINE> <INDENT> if self.isInVehicle: <NEW_LINE> <INDENT> self.vehicle.coord[2] += 10 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.player.coord[2] += 3
升高(无速度)
625941b86e29344779a6245d
def p_tupla_item(p): <NEW_LINE> <INDENT> pass
item : ID ITEM
625941b88e7ae83300e4ae13
def __init__(self, app, options): <NEW_LINE> <INDENT> self.__app = app <NEW_LINE> self.__options = options
:type app: metasdk.MetaApp
625941b87b25080760e392a3
def authenticate(self, request, username=None, password=None, **kwargs): <NEW_LINE> <INDENT> query_set = User.objects.filter(Q(username=username) | Q(telephone=username)) <NEW_LINE> try: <NEW_LINE> <INDENT> if query_set.exists(): <NEW_LINE> <INDENT> user = query_set.get() <NEW_LINE> if user.check_password(password): <NEW_LINE> <INDENT> return user <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> except: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> return None
判断用户名(手机号码)和密码是否正确
625941b87cff6e4e811177ce
def __compare_instance__(self, result, origin): <NEW_LINE> <INDENT> self.assertEqual(result["id"], origin.id) <NEW_LINE> self.assertEqual(result["name"], origin.name) <NEW_LINE> self.assertEqual(result["status"], origin.status) <NEW_LINE> self.assertEqual(result["key_name"], origin.key_name) <NEW_LINE> self.assertEqual(result["human_id"], origin.human_id) <NEW_LINE> self.assertEqual(result["networks"], origin.networks)
compare instances in result and set in mock
625941b8fb3f5b602dac34d7
@CoroutineInputTransformer.wrap <NEW_LINE> def classic_prompt(): <NEW_LINE> <INDENT> prompt_re = re.compile(r'^(>>> ?|^\.\.\. ?)') <NEW_LINE> return _strip_prompts(prompt_re)
Strip the >>>/... prompts of the Python interactive shell.
625941b8293b9510aa2c30e1
def get_available_pageviews_geotargeted(sr, location, start, end, datestr=False, ignore=None): <NEW_LINE> <INDENT> predicted_by_location = { None: get_predicted_pageviews(sr, start, end), location: get_predicted_geotargeted(sr, location, start, end), } <NEW_LINE> if location.metro: <NEW_LINE> <INDENT> country_location = Location(country=location.country) <NEW_LINE> country_prediction = get_predicted_geotargeted(sr, country_location, start, end) <NEW_LINE> predicted_by_location[country_location] = country_prediction <NEW_LINE> <DEDENT> datekey = lambda dt: dt.strftime('%m/%d/%Y') if datestr else dt <NEW_LINE> ret = {} <NEW_LINE> campaigns_by_date = get_campaigns_by_date(sr, start, end, ignore) <NEW_LINE> for date, campaigns in campaigns_by_date.iteritems(): <NEW_LINE> <INDENT> sold_by_location = dict.fromkeys(predicted_by_location.keys(), 0) <NEW_LINE> for camp in campaigns: <NEW_LINE> <INDENT> daily_impressions = camp.impressions / camp.ndays <NEW_LINE> for location in predicted_by_location: <NEW_LINE> <INDENT> if locations_overlap(location, camp.location): <NEW_LINE> <INDENT> sold_by_location[location] += daily_impressions <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> available_by_location = dict.fromkeys(predicted_by_location.keys(), 0) <NEW_LINE> for location, predictions_by_date in predicted_by_location.iteritems(): <NEW_LINE> <INDENT> predicted = predictions_by_date[date] <NEW_LINE> sold = sold_by_location[location] <NEW_LINE> available_by_location[location] = predicted - sold <NEW_LINE> <DEDENT> ret[datekey(date)] = max(0, min(available_by_location.values())) <NEW_LINE> <DEDENT> return ret
Return the available pageviews by date for the subreddit and location. Available pageviews depends on all equal and higher level targets: A target is: subreddit > country > metro e.g. if a campaign is targeting /r/funny in USA/Boston we need to check that there's enough inventory in: * /r/funny (all campaigns targeting /r/funny regardless of geotargeting) * /r/funny + USA (all campaigns targeting /r/funny and USA with or without metro level targeting) * /r/funny + USA + Boston (all campaigns targeting /r/funny and USA and Boston) The available inventory is the smallest of these values.
625941b897e22403b379cde1
def latestTweet(self): <NEW_LINE> <INDENT> home_timeline = self.__api.home_timeline() <NEW_LINE> return home_timeline[0]
Returns latest tweet from logged in users timeline
625941b81b99ca400220a8f9
def fitFunction(self, height, cx, cy, rx, ry, offset): <NEW_LINE> <INDENT> def isG0(a): <NEW_LINE> <INDENT> return a * (a > 0) <NEW_LINE> <DEDENT> return lambda x, y: (isG0(height * (1 - ((x - cx) / rx)**2 - ((y-cy) / ry) ** 2)) + offset)
Returns a gaussian function with the given parameters
625941b8be383301e01b52d5
def add_volume_bricks(name, bricks): <NEW_LINE> <INDENT> ret = {'name': name, 'changes': {}, 'comment': '', 'result': False} <NEW_LINE> volinfo = __salt__['glusterfs.info']() <NEW_LINE> if name not in volinfo: <NEW_LINE> <INDENT> ret['comment'] = 'Volume {0} does not exist'.format(name) <NEW_LINE> return ret <NEW_LINE> <DEDENT> if int(volinfo[name]['status']) != 1: <NEW_LINE> <INDENT> ret['comment'] = 'Volume {0} is not started'.format(name) <NEW_LINE> return ret <NEW_LINE> <DEDENT> current_bricks = [brick['path'] for brick in volinfo[name]['bricks'].values()] <NEW_LINE> if not set(bricks) - set(current_bricks): <NEW_LINE> <INDENT> ret['result'] = True <NEW_LINE> ret['comment'] = 'Bricks already added in volume {0}'.format(name) <NEW_LINE> return ret <NEW_LINE> <DEDENT> bricks_added = __salt__['glusterfs.add_volume_bricks'](name, bricks) <NEW_LINE> if bricks_added: <NEW_LINE> <INDENT> ret['result'] = True <NEW_LINE> ret['comment'] = 'Bricks successfully added to volume {0}'.format(name) <NEW_LINE> new_bricks = [brick['path'] for brick in __salt__['glusterfs.info']()[name]['bricks'].values()] <NEW_LINE> ret['changes'] = {'new': new_bricks, 'old': current_bricks} <NEW_LINE> return ret <NEW_LINE> <DEDENT> ret['comment'] = 'Adding bricks to volume {0} failed'.format(name) <NEW_LINE> return ret
Add brick(s) to an existing volume name Volume name bricks List of bricks to add to the volume .. code-block:: yaml myvolume: glusterfs.add_volume_bricks: - bricks: - host1:/srv/gluster/drive1 - host2:/srv/gluster/drive2 Replicated Volume: glusterfs.add_volume_bricks: - name: volume2 - bricks: - host1:/srv/gluster/drive2 - host2:/srv/gluster/drive3
625941b8c4546d3d9de72878
def __domain__(self): <NEW_LINE> <INDENT> d = Set() <NEW_LINE> for k in self: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> if len(k) == 2: <NEW_LINE> <INDENT> k = k[1] <NEW_LINE> <DEDENT> <DEDENT> except TypeError: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> d += Set(k) <NEW_LINE> <DEDENT> return d
:returns a set of all keys if set is used as a map
625941b87d847024c06be108
def safe_unicode(s): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> result = unicode(s, "utf-8").decode("utf-8") <NEW_LINE> <DEDENT> except UnicodeDecodeError: <NEW_LINE> <INDENT> result = s.decode("latin-1") <NEW_LINE> <DEDENT> except TypeError: <NEW_LINE> <INDENT> result = unicode(s) <NEW_LINE> <DEDENT> return result
Return the unicode representation of obj
625941b8a79ad161976cbf8e
def all_files(directory="..\\raw_data\\"): <NEW_LINE> <INDENT> files = list_files(directory) <NEW_LINE> result = [] <NEW_LINE> for year in files.keys(): <NEW_LINE> <INDENT> result += files[year] <NEW_LINE> <DEDENT> return result
Return flat list of all csv files in the given directory. Args: directory [string] full path to directory with csv files. Default project layout is used if it is not provided Returns: Flat list of csv files as absolute names.
625941b876e4537e8c3514c0
def install(pkg, version=None, py2=False): <NEW_LINE> <INDENT> if version: <NEW_LINE> <INDENT> pkg = pkg + "==" + version <NEW_LINE> <DEDENT> s = shell("pip{0} install {1}".format("2" if py2 else "", pkg)) <NEW_LINE> if s["code"] != 0: <NEW_LINE> <INDENT> errmsg = "PyPI install of {0} failed.".format(pkg) <NEW_LINE> logmsg = "PyPI install failure details:\n{0}" <NEW_LINE> logger.error("Python", logmsg.format(s["stderr"].decode())) <NEW_LINE> raise errors.OperationFailedError(errmsg)
Install a set of Python packages from PyPI. :param str pkg: package to install :param str version: If present, install this specific version :param bool py2: If True, install for Python 2.x instead
625941b88a43f66fc4b53eb2
def deviceinfo(): <NEW_LINE> <INDENT> return execute("termux-telephony-displayinfo")
Get information about the telephony device.
625941b8d18da76e2353231a
def _align_header(header, alignment, width, visible_width, is_multiline=False): <NEW_LINE> <INDENT> if is_multiline: <NEW_LINE> <INDENT> header_lines = re.split(_multiline_codes, header) <NEW_LINE> padded_lines = [_align_header(h, alignment, width, visible_width) for h in header_lines] <NEW_LINE> return "\n".join(padded_lines) <NEW_LINE> <DEDENT> ninvisible = max(0, len(header) - visible_width) <NEW_LINE> if alignment == "left": <NEW_LINE> <INDENT> return _padright(width, header) <NEW_LINE> <DEDENT> elif alignment == "center": <NEW_LINE> <INDENT> return _padboth(width, header) <NEW_LINE> <DEDENT> elif not alignment: <NEW_LINE> <INDENT> return "{0}".format(header) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return _padleft(width, header)
Pad string header to width chars given known visible_width of the header.
625941b8f9cc0f698b14044e
def get_wsgi_application(): <NEW_LINE> <INDENT> return WSGIHandler()
The public interface to Django's WSGI support. Should return a WSGI callable. Allows us to avoid making django.core.handlers.WSGIHandler public API, in case the internal WSGI implementation changes or moves in the future.
625941b860cbc95b062c6391
def time_series(db_filepath, tablename, out_dirpath, num_workers): <NEW_LINE> <INDENT> os.makedirs(out_dirpath, exist_ok=True) <NEW_LINE> config.db.connect(db_filepath) <NEW_LINE> logging.info("Fetching information about distinct pcap files...") <NEW_LINE> pcap_tuples = config.db.fetch_values( tablename, ["pcap_directory", "pcap_filename"], None, True, ["pcap_directory", "pcap_filename"], ) <NEW_LINE> if num_workers is None: <NEW_LINE> <INDENT> if hasattr(os, "sched_getaffinity"): <NEW_LINE> <INDENT> num_workers = len(os.sched_getaffinity(0)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> num_workers = mp.cpu_count() <NEW_LINE> <DEDENT> <DEDENT> if num_workers < 1: <NEW_LINE> <INDENT> num_workers = 1 <NEW_LINE> <DEDENT> logging.info( "Generating time series data from captured packets " + "that were in {} pcap files using {} workers...".format( len(pcap_tuples), num_workers, ), ) <NEW_LINE> task_index = mp.Value("L", 0, lock=False) <NEW_LINE> task_lock = mp.Lock() <NEW_LINE> processes = [] <NEW_LINE> for _ in range(num_workers): <NEW_LINE> <INDENT> p = mp.Process( target=worker, args=( db_filepath, tablename, pcap_tuples, out_dirpath, task_index, task_lock, ), ) <NEW_LINE> p.start() <NEW_LINE> processes.append(p) <NEW_LINE> <DEDENT> for p in processes: <NEW_LINE> <INDENT> p.join() <NEW_LINE> <DEDENT> logging.info("All {} workers completed their tasks".format(num_workers))
Generate time series data from captured packets.
625941b85fdd1c0f98dc007a