code
stringlengths
4
4.48k
docstring
stringlengths
1
6.45k
_id
stringlengths
24
24
def get_notebook_rich_output(func, *args): <NEW_LINE> <INDENT> cell_outputs = call_func_in_notebook(func, *args) <NEW_LINE> assert len(cell_outputs) == 1 <NEW_LINE> output = cell_outputs[0] <NEW_LINE> assert output.get("output_type") == "display_data" <NEW_LINE> return output.get("data", {})
Call a function in a Jupyter notebook context and return the output data. The function is run using `call_func_in_notebook`. This validates that a single rich output was generated, and returns the output data as a dict mapping mime-type to value. Parameters ---------- func : function The function to run. It must be defined at the module level, and will be imported by name into the notebook. args : object Objects passed as args to the function. Returns ------- dict The rich output as a mapping of mime-type to value.
625941b76aa9bd52df036bdb
@task(periodic=30) <NEW_LINE> def guardian(): <NEW_LINE> <INDENT> mutex = get_mutex('metadash_daemon_spawner') <NEW_LINE> if mutex is not None: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> acquired = mutex.acquire(blocking=True, blocking_timeout=0) <NEW_LINE> if acquired: <NEW_LINE> <INDENT> time.sleep(30) <NEW_LINE> running_tasks = dict(itertools.groupby(get_running_task_status(), lambda d: d['name'])) <NEW_LINE> logger.debug('Expected running tasks: %s', Daemons.keys()) <NEW_LINE> logger.debug('Actual running tasks: %s', running_tasks.keys()) <NEW_LINE> for daemon in set(Daemons.keys()) - set(running_tasks.keys()): <NEW_LINE> <INDENT> logger.info('Spawning {}'.format(daemon)) <NEW_LINE> Daemons[daemon].run() <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> logger.info("Previous guardian still running, exiting") <NEW_LINE> <DEDENT> <DEDENT> finally: <NEW_LINE> <INDENT> if acquired: <NEW_LINE> <INDENT> mutex.release() <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> logger.warning("No mutex supporeted cache backend detected, skipping background tasks executing")
Guardian task
625941b7ff9c53063f47c036
def columnCount( self, parent ): <NEW_LINE> <INDENT> return 1
Returns number of columns. Here always returns 1. @param parent : parent QObject @type parent : L{QtCore.QObject)
625941b70383005118ecf41d
def test_transform_int(self): <NEW_LINE> <INDENT> t = configtypes.Float() <NEW_LINE> self.assertEqual(t.transform('1337'), 1337.00)
Test transform with an int.
625941b73cc13d1c6d3c71bd
def float_unpack80(QQ, size): <NEW_LINE> <INDENT> if size == 10 or size == 12 or size == 16: <NEW_LINE> <INDENT> MIN_EXP = -16381 <NEW_LINE> MAX_EXP = 16384 <NEW_LINE> MANT_DIG = 64 <NEW_LINE> TOP_BITS = 80 - 64 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError("invalid size value") <NEW_LINE> <DEDENT> if len(QQ) != 2: <NEW_LINE> <INDENT> raise ValueError("QQ must be two 64 bit uints") <NEW_LINE> <DEDENT> if not objectmodel.we_are_translated(): <NEW_LINE> <INDENT> if QQ[1] >> TOP_BITS: <NEW_LINE> <INDENT> raise ValueError("input '%r' out of range '%r'" % (QQ, QQ[1]>>TOP_BITS)) <NEW_LINE> <DEDENT> <DEDENT> one = r_ulonglong(1) <NEW_LINE> sign = rarithmetic.intmask(QQ[1] >> TOP_BITS - 1) <NEW_LINE> exp = rarithmetic.intmask((QQ[1] & ((one << TOP_BITS - 1) - 1))) <NEW_LINE> mant = QQ[0] <NEW_LINE> if exp == MAX_EXP - MIN_EXP + 2: <NEW_LINE> <INDENT> if mant == 0: <NEW_LINE> <INDENT> result = rfloat.INFINITY <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> exp = r_ulonglong(0x7ff) << 52 <NEW_LINE> mant = r_ulonglong(mant) >> size + 1 <NEW_LINE> if mant == 0: <NEW_LINE> <INDENT> result = rfloat.NAN <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> uint = exp | r_ulonglong(mant) | r_ulonglong(sign) <NEW_LINE> result = longlong2float(cast(LONGLONG, uint)) <NEW_LINE> <DEDENT> return result <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> result = math.ldexp(mant, exp + MIN_EXP - MANT_DIG - 1) <NEW_LINE> <DEDENT> return -result if sign else result
Unpack a (mant, exp) tuple of r_ulonglong in 80-bit extended format into a python float (a double)
625941b7e1aae11d1e749aec
def load_doctests(loader, tests, ignore, module, additional_skip_names=None, patch_path=True): <NEW_LINE> <INDENT> _patcher = Patcher(additional_skip_names=additional_skip_names, patch_path=patch_path) <NEW_LINE> globs = _patcher.replace_globs(vars(module)) <NEW_LINE> tests.addTests(doctest.DocTestSuite(module, globs=globs, setUp=_patcher.setUp, tearDown=_patcher.tearDown)) <NEW_LINE> return tests
Load the doctest tests for the specified module into unittest. Args: loader, tests, ignore : arguments passed in from `load_tests()` module: module under test additional_skip_names: see :py:class:`TestCase` for an explanation patch_path: see :py:class:`TestCase` for an explanation File `example_test.py` in the pyfakefs release provides a usage example.
625941b79f2886367277a6ca
def test_distributor_autocompletion(self): <NEW_LINE> <INDENT> response = self.client.post('/search.json/', {'field': 'distributor', 'search' : 'NA'}) <NEW_LINE> self.assertContains(response, 'NA', count=2, status_code=200)
Tests autocompletion for distributor.
625941b766673b3332b91ed0
def sflux(imvec, priorvec, flux, norm_reg=NORM_REGULARIZER): <NEW_LINE> <INDENT> if norm_reg: <NEW_LINE> <INDENT> norm = flux**2 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> norm = 1 <NEW_LINE> <DEDENT> out = -(np.sum(imvec) - flux)**2 <NEW_LINE> return out/norm
Total flux constraint
625941b74f6381625f11487f
def set_supported_types(self): <NEW_LINE> <INDENT> ret = [ ] <NEW_LINE> output = "" <NEW_LINE> try: <NEW_LINE> <INDENT> output = self.__run(["--help"]) <NEW_LINE> <DEDENT> except ValueError as ex: <NEW_LINE> <INDENT> log.debug1("ipset error: %s" % ex) <NEW_LINE> <DEDENT> lines = output.splitlines() <NEW_LINE> in_types = False <NEW_LINE> for line in lines: <NEW_LINE> <INDENT> if in_types: <NEW_LINE> <INDENT> splits = line.strip().split(None, 2) <NEW_LINE> if splits[0] not in ret and splits[0] in IPSET_TYPES: <NEW_LINE> <INDENT> ret.append(splits[0]) <NEW_LINE> <DEDENT> <DEDENT> if line.startswith("Supported set types:"): <NEW_LINE> <INDENT> in_types = True <NEW_LINE> <DEDENT> <DEDENT> return ret
Return types that are supported by the ipset command and kernel
625941b78e05c05ec3eea1aa
def get_section_dict(self, section, subsection=None): <NEW_LINE> <INDENT> dict_obj = dict() <NEW_LINE> for key in self.keys(section, subsection): <NEW_LINE> <INDENT> dict_obj[key] = self.get(section, key, subsection=subsection) <NEW_LINE> <DEDENT> return dict_obj
Return a dict representation of a section. :param section: The section of the configuration. :param subsection: The subsection of the configuration. :returns: Dictionary reprisentation of the config section. :rtype: dict
625941b7b545ff76a8913c58
def get_fallback_view(service): <NEW_LINE> <INDENT> def _fallback_view(request): <NEW_LINE> <INDENT> if request.method not in service.defined_methods: <NEW_LINE> <INDENT> response = HTTPMethodNotAllowed() <NEW_LINE> response.allow = service.defined_methods <NEW_LINE> return response <NEW_LINE> <DEDENT> acceptable = [] <NEW_LINE> for method, _, args in service.definitions: <NEW_LINE> <INDENT> if method != request.method: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> if 'accept' in args: <NEW_LINE> <INDENT> acceptable.extend( service.get_acceptable(method, filter_callables=True)) <NEW_LINE> if 'acceptable' in request.info: <NEW_LINE> <INDENT> for content_type in request.info['acceptable']: <NEW_LINE> <INDENT> if content_type not in acceptable: <NEW_LINE> <INDENT> acceptable.append(content_type) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> if not request.accept.best_match(acceptable): <NEW_LINE> <INDENT> response = HTTPNotAcceptable() <NEW_LINE> response.content_type = "application/json" <NEW_LINE> response.body = json.dumps(acceptable) <NEW_LINE> return response <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> raise PredicateMismatch(service.name) <NEW_LINE> <DEDENT> return _fallback_view
Fallback view for a given service, called when nothing else matches. This method provides the view logic to be executed when the request does not match any explicitly-defined view. Its main responsibility is to produce an accurate error response, such as HTTPMethodNotAllowed or HTTPNotAcceptable.
625941b74a966d76dd550e45
def missingNumber(self, nums): <NEW_LINE> <INDENT> return len(nums) * (len(nums) + 1) // 2 - sum(nums)
:type nums: List[int] :rtype: int
625941b785dfad0860c3ac92
def func(x, y, z): <NEW_LINE> <INDENT> time.sleep(x/10) <NEW_LINE> return np.sum([x, y, z])
Example function with only one argument
625941b72ae34c7f2600cf6b
def setUp(self): <NEW_LINE> <INDENT> if IS_PLONE_5: <NEW_LINE> <INDENT> self.portal = self.layer['portal'] <NEW_LINE> setRoles(self.portal, TEST_USER_ID, ['Manager']) <NEW_LINE> _createMemberarea(self.portal, TEST_USER_ID) <NEW_LINE> self.folder = self.portal.portal_membership.getHomeFolder(TEST_USER_ID) <NEW_LINE> transaction.commit() <NEW_LINE> self.afterSetUp() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> super(TestMailAction, self).setUp()
The setup steps below are only needed in Plone 5, because in Plone 4 they are taken care of automatically by PloneTestCase.
625941b7c432627299f04a7d
def _refine_candidates(qset, token, year, sentence_start): <NEW_LINE> <INDENT> antedating_margin = _set_antedating_margin(year) <NEW_LINE> if not qset.exists(): <NEW_LINE> <INDENT> response = [] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> qset = qset.filter(lemma__firstyear__lt=antedating_margin) <NEW_LINE> response = list(qset) <NEW_LINE> if qset.count() > 1: <NEW_LINE> <INDENT> if sentence_start: <NEW_LINE> <INDENT> qset2 = qset.filter(wordform__iexact=token) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> qset2 = qset.filter(wordform=token) <NEW_LINE> <DEDENT> if qset2: <NEW_LINE> <INDENT> response = list(qset2) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return response
Ditch any candidates from the query set that don't seem viable; return the rest as a list.
625941b71d351010ab855957
def branch_if_overflow_set(cpu: 'Cpu', value: int) -> None: <NEW_LINE> <INDENT> _branch(cpu, cpu.status.overflow, value)
BVS instruction
625941b75e10d32532c5ed68
def resample_img_to_ref(img, config, ref_img=None): <NEW_LINE> <INDENT> if config['Interpolator'] == "BSpline": <NEW_LINE> <INDENT> interpolator = sitk.sitkBSpline <NEW_LINE> <DEDENT> elif config['Interpolator'] == "Linear": <NEW_LINE> <INDENT> interpolator = sitk.sitkLinear <NEW_LINE> <DEDENT> elif config['Interpolator'] == "NearestNeighbor": <NEW_LINE> <INDENT> interpolator = sitk.sitkNearestNeighbor <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> interpolator = sitk.sitkLinear <NEW_LINE> <DEDENT> if not ref_img: <NEW_LINE> <INDENT> ref_img = img <NEW_LINE> <DEDENT> target_size = img.GetSize() <NEW_LINE> target_spacing = img.GetSpacing() <NEW_LINE> if config['SetTargetSpacing'] is None: <NEW_LINE> <INDENT> target_spacing = ref_img.GetSpacing() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> target_spacing = config['SetTargetSpacing'] <NEW_LINE> for i, space in enumerate(target_spacing, 0): <NEW_LINE> <INDENT> if space <= 0: <NEW_LINE> <INDENT> target_spacing[i] = ref_img.GetSpacing()[i] <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> if config['SetTargetSize'] is None: <NEW_LINE> <INDENT> target_size = ref_img.GetSize() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> target_size = config['SetTargetSize'] <NEW_LINE> target_size = target_size.tolist() <NEW_LINE> for i, size in enumerate(target_size, 0): <NEW_LINE> <INDENT> if size == 0: <NEW_LINE> <INDENT> target_size[i] = ref_img.GetSize()[i] <NEW_LINE> <DEDENT> elif size < 0: <NEW_LINE> <INDENT> img_size = ref_img.GetSize()[i] <NEW_LINE> img_spacing = ref_img.GetSpacing()[i] <NEW_LINE> new_spacing = target_spacing[i] <NEW_LINE> target_size[i] = int(img_size*img_spacing/new_spacing) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> target_origin = img.GetOrigin() <NEW_LINE> if config['SetTargetOrigin'] is None: <NEW_LINE> <INDENT> target_origin = ref_img.GetOrigin() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> target_origin = config['SetTargetOrigin'] <NEW_LINE> <DEDENT> target_direction = img.GetDirection() <NEW_LINE> if config['SetTargetDirection'] is None: <NEW_LINE> <INDENT> target_direction = ref_img.GetDirection() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> target_direction = config['SetTargetDirection'] <NEW_LINE> <DEDENT> default_value = config['DefaultValue'] <NEW_LINE> rs_img = sitk.Resample(img, target_size, sitk.Transform(), interpolator, np.array(target_origin).astype(float), np.array(target_spacing).astype(float), np.array(target_direction).astype(float), float(default_value), img.GetPixelIDValue()) <NEW_LINE> return rs_img
Resample and alignes image to reference image. Overwrite target spacing, size, origin and direction if specified in the configuration file. Args: img: sitk image (sitk object) ref_img: reference image (sitk object) config: dictionary with target and interpolation parameters. Returns: Resampled image (sitk object)
625941b7d10714528d5ffb18
def __init__(self, image, model, configuration): <NEW_LINE> <INDENT> self.image = image <NEW_LINE> self.model = model <NEW_LINE> self.configuration = configuration
Constructor :param image: image to search :param model: face detection model :param configuration: FaceSearchConfiguration instance
625941b7e5267d203edcdada
def show_colornames(): <NEW_LINE> <INDENT> names = sorted(colornames().keys()) <NEW_LINE> for name in names: <NEW_LINE> <INDENT> print('{:22s}{}'.format(name, colornames()[name]))
show (print) all available color names and their value.
625941b7e64d504609d7467a
def interfaces(): <NEW_LINE> <INDENT> with salt.utils.winapi.Com(): <NEW_LINE> <INDENT> c = wmi.WMI() <NEW_LINE> ifaces = {} <NEW_LINE> for iface in c.Win32_NetworkAdapterConfiguration(IPEnabled=1): <NEW_LINE> <INDENT> ifaces[iface.Description] = dict() <NEW_LINE> if iface.MACAddress: <NEW_LINE> <INDENT> ifaces[iface.Description]['hwaddr'] = iface.MACAddress <NEW_LINE> <DEDENT> if iface.IPEnabled: <NEW_LINE> <INDENT> ifaces[iface.Description]['up'] = True <NEW_LINE> ifaces[iface.Description]['inet'] = [] <NEW_LINE> for ip in iface.IPAddress: <NEW_LINE> <INDENT> item = {} <NEW_LINE> item['broadcast'] = '' <NEW_LINE> try: <NEW_LINE> <INDENT> item['broadcast'] = iface.DefaultIPGateway[0] <NEW_LINE> <DEDENT> except Exception: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> item['netmask'] = iface.IPSubnet[0] <NEW_LINE> item['label'] = iface.Description <NEW_LINE> item['address'] = ip <NEW_LINE> ifaces[iface.Description]['inet'].append(item) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> ifaces[iface.Description]['up'] = False <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return ifaces
Return details about each network interface
625941b745492302aab5e0f9
def save(self, filename, root="config"): <NEW_LINE> <INDENT> d = os.path.dirname(filename) <NEW_LINE> with tempfile.NamedTemporaryFile(mode="w", delete=False, dir=d) as outfile: <NEW_LINE> <INDENT> self.saveToStream(outfile, root) <NEW_LINE> umask = os.umask(0o077) <NEW_LINE> os.umask(umask) <NEW_LINE> os.chmod(outfile.name, (~umask & 0o666)) <NEW_LINE> shutil.move(outfile.name, filename)
Save a Python script to the named file, which, when loaded, reproduces this config. Parameters ---------- filename : `str` Desination filename of this configuration. root : `str`, optional Name to use for the root config variable. The same value must be used when loading (see `lsst.pex.config.Config.load`). See also -------- lsst.pex.config.Config.saveToStream lsst.pex.config.Config.saveToString lsst.pex.config.Config.load lsst.pex.config.Config.loadFromStream lsst.pex.config.Config.loadFromString
625941b77c178a314d6ef291
def test_string_arg__no_access__raise_false(self): <NEW_LINE> <INDENT> view = _TestView.as_view( permission_required='djemtest.add_olptest', raise_exception=False ) <NEW_LINE> request = self.factory.get('/test/') <NEW_LINE> request.user = self.user <NEW_LINE> with self.assertRaises(PermissionDenied): <NEW_LINE> <INDENT> view(request)
Test the PermissionRequiredMixin with a valid permission as a single string and a ``raise_exception`` set to False. Ensure the mixin correctly denies access to the view for a user that has not been granted that permission at the model level, by raising PermissionDenied (which would be translated into a 403 error page). This should happen despite ``raise_exception `` being False, due to the user already being authenticated.
625941b77c178a314d6ef292
def iter_locations(self): <NEW_LINE> <INDENT> for alt_loc in self.locations: <NEW_LINE> <INDENT> yield self.locations[alt_loc]
Iterate over Atom.Location objects for this atom, including primary location. >>> atom = Atom("ATOM 2209 CB TYR A 299 6.167 22.607 20.046 1.00 8.12 C") >>> for c in atom.iter_locations(): ... print c ... [6.167, 22.607, 20.046] A
625941b78c3a8732951581f7
def icecast_json(result, mount): <NEW_LINE> <INDENT> if result.status_code == 200: <NEW_LINE> <INDENT> result = result.json() <NEW_LINE> if "icestats" in result and "source" in result["icestats"]: <NEW_LINE> <INDENT> if isinstance(result["icestats"]["source"], dict): <NEW_LINE> <INDENT> return result["icestats"]["source"] <NEW_LINE> <DEDENT> for m in result["icestats"]["source"]: <NEW_LINE> <INDENT> _, url = m["listenurl"].rsplit("/", 1) <NEW_LINE> if url != mount: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> return m <NEW_LINE> <DEDENT> return {} <NEW_LINE> <DEDENT> return {} <NEW_LINE> <DEDENT> if result.status_code == 400: <NEW_LINE> <INDENT> error = error_regex.match(result.content) <NEW_LINE> if error: <NEW_LINE> <INDENT> logging.warning("Icecast API - mount unavailable ({})".format(error.group('err'))) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> logging.warning("Icecast API - Unknown error: {}".format(result.content)) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> logging.warning("Icecast API - Unknown error: {}".format(result.content))
Parse out json from an icecast endpoint.
625941b7460517430c393fc8
def interpret(self) -> None: <NEW_LINE> <INDENT> self.fstype = "journal on ext3" <NEW_LINE> self.devname = block_device_name(self.bh['b_bdev']) <NEW_LINE> self.offset = int(self.bh['b_blocknr']) * int(self.bh['b_size']) <NEW_LINE> self.length = int(self.bh['b_size'])
Interprets the ext3 buffer_head to populate its attributes
625941b796565a6dacc8f50e
def radio_factory(button_name, toolbar, callback, cb_arg=None, tooltip=None, group=None): <NEW_LINE> <INDENT> button = RadioToolButton(group=group) <NEW_LINE> button.set_named_icon(button_name) <NEW_LINE> if callback is not None: <NEW_LINE> <INDENT> if cb_arg is None: <NEW_LINE> <INDENT> button.connect('clicked', callback) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> button.connect('clicked', callback, cb_arg) <NEW_LINE> <DEDENT> <DEDENT> if hasattr(toolbar, 'insert'): <NEW_LINE> <INDENT> toolbar.insert(button, -1) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> toolbar.props.page.insert(button, -1) <NEW_LINE> <DEDENT> button.show() <NEW_LINE> if tooltip is not None: <NEW_LINE> <INDENT> button.set_tooltip(tooltip) <NEW_LINE> <DEDENT> return button
Add a radio button to a toolbar
625941b7ad47b63b2c509dc3
def fix_profiles(y, z): <NEW_LINE> <INDENT> for i in range(y.shape[0]): <NEW_LINE> <INDENT> if len(y[i][~y[i].mask]) and y[i][~y[i].mask][0] > y[i][~y[i].mask][-1]: <NEW_LINE> <INDENT> y[i][~y[i].mask] = y[i][~y[i].mask][::-1] <NEW_LINE> z[i][~z[i].mask] = z[i][~z[i].mask][::-1]
Inverts the profile in-place if it is an upcast, so the mesh has a correct y-axis
625941b77b180e01f3dc463f
def testSearchTMProjectDto(self): <NEW_LINE> <INDENT> pass
Test SearchTMProjectDto
625941b776d4e153a657e969
@marsloader(query_api_support=True) <NEW_LINE> def audit_query(request, group_by, date_group, aggregate_by, limit, offset, order_by, status, date_range, filters, record=None): <NEW_LINE> <INDENT> query_filters = copy.copy(filters) <NEW_LINE> if record: <NEW_LINE> <INDENT> if filters.has_key('record_id') and filters['record_id'] is not record.id: <NEW_LINE> <INDENT> return HttpResponseBadRequest('Cannot make Audit queries over records not in the request url') <NEW_LINE> <DEDENT> query_filters['record_id'] = record.id <NEW_LINE> <DEDENT> q = FactQuery(Audit, AUDIT_FILTERS, group_by, date_group, aggregate_by, limit, offset, order_by, None, date_range, query_filters, record=None, carenet=None) <NEW_LINE> try: <NEW_LINE> <INDENT> q.execute() <NEW_LINE> if q.query_filters.has_key('record_id') and not filters.has_key('record_id'): <NEW_LINE> <INDENT> del q.query_filters['record_id'] <NEW_LINE> <DEDENT> return q.render(AUDIT_TEMPLATE) <NEW_LINE> <DEDENT> except ValueError as e: <NEW_LINE> <INDENT> return HttpResponseBadRequest(str(e))
Select Audit Objects via the Query API Interface
625941b78e71fb1e9831d5e7
def get_img_url(url): <NEW_LINE> <INDENT> id = url.split("/")[-2] <NEW_LINE> return "http://h5.cyol.com/special/daxuexi/"+id+"/images/end.jpg"
获取观看完的背景图 :param url: get_version_under_compilation数组中的获取的url .get("vds")[0].get("url") :return:截图地址
625941b7d6c5a10208143e81
def media_play(self): <NEW_LINE> <INDENT> self._playing = True <NEW_LINE> self._braviarc.media_play()
Send play command.
625941b7ac7a0e7691ed3f13
def delete_language_pair(self, price_list_id, source_language, target_language, **kwargs): <NEW_LINE> <INDENT> kwargs['_return_http_data_only'] = True <NEW_LINE> if kwargs.get('async_req'): <NEW_LINE> <INDENT> return self.delete_language_pair_with_http_info(price_list_id, source_language, target_language, **kwargs) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> (data) = self.delete_language_pair_with_http_info(price_list_id, source_language, target_language, **kwargs) <NEW_LINE> return data
Remove language pair # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_language_pair(price_list_id, source_language, target_language, async_req=True) >>> result = thread.get() :param async_req bool :param int price_list_id: (required) :param str source_language: (required) :param str target_language: (required) :return: None If the method is called asynchronously, returns the request thread.
625941b721a7993f00bc7b23
def test_domain_pair_counts(): <NEW_LINE> <INDENT> print(f'test_cohd_covid_io: testing /metadata/domainPairCounts on {cr.server}..... ') <NEW_LINE> json, df = cr.domain_pair_counts(dataset_id=2) <NEW_LINE> schema = [_s('dataset_id', int), _s('domain_id_1', str), _s('domain_id_2', str), _s('count', int)] <NEW_LINE> check_results_schema(json, schema) <NEW_LINE> assert len(json['results']) == 6 <NEW_LINE> expected_results = [ { "count": 144303, "dataset_id": 2, "domain_id_1": "Condition", "domain_id_2": "Condition" }, { "count": 182128, "dataset_id": 2, "domain_id_1": "Drug", "domain_id_2": "Procedure" } ] <NEW_LINE> check_result_values(json, expected_results) <NEW_LINE> print('...passed')
Check the /metadata/domainPairCounts endpoint for dataset 2 Checks the response json conforms to the expected schema and includes the expected results (see expected_results).
625941b7a8ecb033257d2f10
def uuid_and_instance(server): <NEW_LINE> <INDENT> if isinstance(server, Server): <NEW_LINE> <INDENT> return server.uuid, server <NEW_LINE> <DEDENT> return server, None
server => uuid, instance
625941b7d18da76e2353230b
def _get_blueprint_path(self, blueprint): <NEW_LINE> <INDENT> return os.path.join(os.path.dirname(__file__), 'blueprint', blueprint)
get the path to a blueprint in the blueprints dir
625941b7f548e778e58cd3b5
def __sub__(self, other): <NEW_LINE> <INDENT> sub_square = self.side - other.side <NEW_LINE> return Carre(sub_square)
Méthode permettant de soustraire deux carrés.
625941b73317a56b86939aa3
def cleanup_apps(soa_dir): <NEW_LINE> <INDENT> log.info("Loading marathon configuration") <NEW_LINE> marathon_config = marathon_tools.load_marathon_config() <NEW_LINE> log.info("Connecting to marathon") <NEW_LINE> client = marathon_tools.get_marathon_client(marathon_config.get_url(), marathon_config.get_username(), marathon_config.get_password()) <NEW_LINE> valid_services = get_services_for_cluster(instance_type='marathon', soa_dir=soa_dir) <NEW_LINE> running_app_ids = marathon_tools.list_all_marathon_app_ids(client) <NEW_LINE> for app_id in running_app_ids: <NEW_LINE> <INDENT> log.debug("Checking app id %s", app_id) <NEW_LINE> try: <NEW_LINE> <INDENT> service, instance, _, __ = marathon_tools.deformat_job_id(app_id) <NEW_LINE> <DEDENT> except InvalidJobNameError: <NEW_LINE> <INDENT> log.warn("%s doesn't conform to paasta naming conventions? Skipping." % app_id) <NEW_LINE> continue <NEW_LINE> <DEDENT> if (service, instance) not in valid_services: <NEW_LINE> <INDENT> delete_app(app_id, client)
Clean up old or invalid jobs/apps from marathon. Retrieves both a list of apps currently in marathon and a list of valid app ids in order to determine what to kill. :param soa_dir: The SOA config directory to read from
625941b7925a0f43d2549cad
def get_reachable_boats(self,index): <NEW_LINE> <INDENT> pawn_moves = self.objects[index].moves <NEW_LINE> if pawn_moves == 0: <NEW_LINE> <INDENT> return numpy.array([]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if pawn_moves == 1: <NEW_LINE> <INDENT> reachable_land = numpy.array(index) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> reachable_land = numpy.append(self.get_connections([index], 'land_conn', pawn_moves-1),index) <NEW_LINE> <DEDENT> water_1 = self.get_connections(numpy.append(reachable_land,index), 'all_conn',1) <NEW_LINE> reachable_all = self.get_connections([index], 'all_conn', pawn_moves) <NEW_LINE> next_to_land = numpy.intersect1d(water_1, reachable_all) <NEW_LINE> reachable_boats = [] <NEW_LINE> for i in next_to_land: <NEW_LINE> <INDENT> if self.objects[i]: <NEW_LINE> <INDENT> if self.objects[i].owner == self.game.current_player and 'boat' in self.objects[i].label: <NEW_LINE> <INDENT> if not self.objects[i].occupying_pawn: <NEW_LINE> <INDENT> reachable_boats.append(i) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> return reachable_boats
Returns a list of indices for hexes containing a boardable boat for a pawn located at index.
625941b7a219f33f346287af
def load_arguments(self, _): <NEW_LINE> <INDENT> with self.argument_context('script') as c: <NEW_LINE> <INDENT> c.argument('script', options_list=['--script', '-s'], help='Script to load.') <NEW_LINE> c.argument('target', options_list=['--target', '-t'], help='Transpilation target.') <NEW_LINE> c.argument('output', options_list=['--output-file', '-of'], help='Output file to be generated.')
Load CLI Args for Knack parser
625941b771ff763f4b5494c8
def ew_prob(ews, zs, ew_assigner): <NEW_LINE> <INDENT> w0s = ew_assigner.w0_func(zs) <NEW_LINE> ews_rest = ews/(1.0 + zs) <NEW_LINE> prob_ew = exp(-0.95*ews_rest/w0s) - exp(-1.05*ews_rest/w0s) <NEW_LINE> prob_ew[ews_rest < 0.0] = 1.0 <NEW_LINE> return prob_ew
Return P(EW|EW_FUNCTION), the likelihood an equivalent width +- 5% was drawn from an particular EW distribution Parameters ---------- ews, zs : array the equivalent widthd of the and redshifts of the objects ew_assigner : simcat.equivalent_width:EquivalentWidthAssigner the distibution of equivalent widths Returns ------- prob : array P(EW|EW_FUNCTION)
625941b7097d151d1a222c96
def verifiersetter(self, f): <NEW_LINE> <INDENT> self._verifiersetter = f
Register a function as the verifier setter. A verifier is better together with request token, but it is not required. A verifier is used together with request token for exchanging access token, it has an expire time, in this case, it would be a better design if you put them in a cache. The implemented code looks like:: @oauth.verifiersetter def save_verifier(verifier, token, *args, **kwargs): data = Verifier( verifier=verifier['oauth_verifier'], request_token=token, user=get_current_user() ) return data.save()
625941b730dc7b76659017a4
def start(self): <NEW_LINE> <INDENT> self._time = time.perf_counter()
Starts the *StopWatch*.
625941b70a50d4780f666cc9
def _description(self): <NEW_LINE> <INDENT> pass
Returns a description string.
625941b78e7ae83300e4ae05
def send_venue(token: str, chat_id: Union[int, str], latitude: float, longitude: float, title: str, address: str, foursquare_id: Optional[str] = None, foursquare_type: Optional[str] = None, disable_notification: Optional[bool] = None, reply_to_message_id: Optional[int] = None, reply_markup: Optional[ReplyMarkup] = None ) -> Message: <NEW_LINE> <INDENT> data = { 'chat_id': chat_id, 'latitude': latitude, 'longitude': longitude, 'title': title, 'address': address, 'foursquare_id': foursquare_id, 'foursquare_type': foursquare_type, 'disable_notification': disable_notification, 'reply_to_message_id': reply_to_message_id, 'reply_markup': reply_markup.unfold() } <NEW_LINE> return request( token, 'sendVenue', data, excpect='message' )
Use this method to send information about a venue. On success, the sent Message is returned.
625941b70fa83653e4656df7
def data_files(self): <NEW_LINE> <INDENT> tf_record_pattern = os.path.join(FLAGS.data_dir, '%s-*' % self.subset) <NEW_LINE> print(tf_record_pattern) <NEW_LINE> data_files = tf.gfile.Glob(tf_record_pattern) <NEW_LINE> if not data_files: <NEW_LINE> <INDENT> print('No files found for dataset %s/%s at %s' % (self.name, self.subset, FLAGS.data_dir)) <NEW_LINE> exit(-1) <NEW_LINE> <DEDENT> return data_files
Returns a python list of all (sharded) data subset files. Returns: python list of all (sharded) data set files. Raises: ValueError: if there are not data_files matching the subset.
625941b7a8370b77170526da
def getBans_async(self, _cb, current=None): <NEW_LINE> <INDENT> pass
Fetch all current IP bans on the server. Returns: List of bans.
625941b797e22403b379cdd2
def print_licences(params, metadata): <NEW_LINE> <INDENT> if hasattr(params, 'licenses'): <NEW_LINE> <INDENT> if params.licenses: <NEW_LINE> <INDENT> _pp(metadata.licenses_desc()) <NEW_LINE> <DEDENT> sys.exit(0)
Print licenses. :param argparse.Namespace params: parameter :param bootstrap_py.classifier.Classifiers metadata: package metadata
625941b730c21e258bdfa2d7
def should_include_for(self, userid, all_userids, is_finished, public_ranges): <NEW_LINE> <INDENT> return public_ranges or is_finished or self.user.userid == userid
Range actions are only shown to the user who makes them, while the game is running - at least in competition mode.
625941b7d58c6744b4257a9a
def format_seconds(secs, showms = False): <NEW_LINE> <INDENT> if secs == None: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> s = secs <NEW_LINE> d = (s - (s % 86400)) // 86400 <NEW_LINE> s -= d * 86400 <NEW_LINE> h = (s - (s % 3600)) // 3600 <NEW_LINE> s -= h * 3600 <NEW_LINE> m = (s - (s % 60)) // 60 <NEW_LINE> s -= m * 60 <NEW_LINE> formatted_duration = "" <NEW_LINE> if secs >= 86400: formatted_duration += "%id" % d <NEW_LINE> if secs >= 3600: formatted_duration += "%ih" % h <NEW_LINE> if secs >= 60: formatted_duration += "%im" % m <NEW_LINE> if showms: <NEW_LINE> <INDENT> formatted_duration += "%i%ss" % (s, _get_milliseconds_suffix(s)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> formatted_duration += "%is" % (s,) <NEW_LINE> <DEDENT> return formatted_duration
Return a string with a formatted duration (days, hours, mins, secs, ms) for pretty printing. :param secs: a duration in seconds (integer or float) (or None). :param showms: whether to show ms or not. Default False.
625941b7fb3f5b602dac34c8
def yes_shoot(self): <NEW_LINE> <INDENT> if not self.shooting: <NEW_LINE> <INDENT> sound.start_shoot() <NEW_LINE> self.shooting = True
Start the shoot sound.
625941b7d8ef3951e3243377
def register_file_with_db(self, fileobj): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> self.add_to_file_table(fileobj) <NEW_LINE> word_ids = self.add_to_dictionary_table(fileobj.name) <NEW_LINE> self.add_to_search_table(fileobj.uid, word_ids) <NEW_LINE> <DEDENT> except UnicodeEncodeError as e: <NEW_LINE> <INDENT> log.e("wrong encoding for filename '%s' (%s)", fileobj.relpath, e.__class__.__name__)
add data in File object to relevant tables in media database
625941b75fdd1c0f98dc006b
def test_youtube_recon_return_none_with_none_input(): <NEW_LINE> <INDENT> assert youtube_recon(None) is None
Test recon function return None with None input.
625941b723849d37ff7b2ecc
def h_mle(xs,units='bits'): <NEW_LINE> <INDENT> p_hats = frequencies(xs) <NEW_LINE> return h(p_hats,units=units)
Compute MLE estimator of entropy
625941b7167d2b6e312189d8
def setUp(self): <NEW_LINE> <INDENT> self.app = create_app() <NEW_LINE> self.client = self.app.test_client <NEW_LINE> self.database_name = "trivia_test" <NEW_LINE> self.database_path = "postgresql://postgres:root@{}/{}".format('localhost:5432', self.database_name) <NEW_LINE> setup_db(self.app, self.database_path) <NEW_LINE> self.new_question = { 'question': 'new question', 'answer': 'new answer', 'difficulty': 1, 'category': 1 } <NEW_LINE> with self.app.app_context(): <NEW_LINE> <INDENT> self.db = SQLAlchemy() <NEW_LINE> self.db.init_app(self.app) <NEW_LINE> self.db.create_all()
Define test variables and initialize app.
625941b701c39578d7e74c7e
def signal(self, signal_type=np.float32): <NEW_LINE> <INDENT> timestamp_id = "timestamps" <NEW_LINE> signals_id = "signals" <NEW_LINE> if hasattr(self, timestamp_id): <NEW_LINE> <INDENT> timestamps = getattr(self, timestamp_id) <NEW_LINE> signals = getattr(self, signals_id) <NEW_LINE> return (timestamps, signals) <NEW_LINE> <DEDENT> event_pointer = self._signal_start_byte <NEW_LINE> self._reader.move_pointer_to(event_pointer) <NEW_LINE> event_length = 8 <NEW_LINE> for channel in range(self.number_of_channels): <NEW_LINE> <INDENT> event_length += self._get_variable_type(channel)[1] <NEW_LINE> <DEDENT> data_length = len(self._reader) <NEW_LINE> n_events = data_length - self._reader.current_pointer <NEW_LINE> n_events /= event_length <NEW_LINE> n_events = int(n_events-1) <NEW_LINE> timestamps = np.ndarray((n_events, ), dtype=datetime) <NEW_LINE> signals = np.ndarray((n_events, self.number_of_channels), dtype=signal_type) <NEW_LINE> i = 0 <NEW_LINE> while self._reader.current_pointer + event_length < data_length: <NEW_LINE> <INDENT> timestamp = self._reader.unpack("Q", 8) <NEW_LINE> timestamp = self._get_timestamp(timestamp) <NEW_LINE> timestamps[i] = timestamp <NEW_LINE> event_signal_data = [] <NEW_LINE> for channel in range(self.number_of_channels): <NEW_LINE> <INDENT> variable_type = self._get_variable_type(channel) <NEW_LINE> channel_data = self._reader.unpack(variable_type[0], variable_type[1]) <NEW_LINE> event_signal_data.append(channel_data) <NEW_LINE> <DEDENT> signals[i] = event_signal_data <NEW_LINE> i += 1 <NEW_LINE> <DEDENT> del self._reader <NEW_LINE> if i != n_events: <NEW_LINE> <INDENT> exit_status = "Read number of events (" <NEW_LINE> exit_status += str(i) + ") doesn't fit expectation (" <NEW_LINE> exit_status += str(n_events) + ")" <NEW_LINE> raise RuntimeError(exit_status) <NEW_LINE> <DEDENT> setattr(self, timestamp_id, timestamps) <NEW_LINE> setattr(self, signals_id, signals) <NEW_LINE> return (timestamps, signals)
Timestamps and signals in all channels. Args: signal_type (type): Type of signal. Returns: (list[datetime.datetime], list[list[np.any]]): Tuple containing the timestamps and the values for each channnel at that timestamp.
625941b7c4546d3d9de7286a
def set_colour(self): <NEW_LINE> <INDENT> if self.HP / self.MAX_HP >= 0.75: <NEW_LINE> <INDENT> self.colour = LIGHT0 <NEW_LINE> <DEDENT> elif self.HP / self.MAX_HP >= 0.50: <NEW_LINE> <INDENT> self.colour = BRIGHT_YELLOW <NEW_LINE> <DEDENT> elif self.HP / self.MAX_HP >= 0.25: <NEW_LINE> <INDENT> self.colour = BRIGHT_ORANGE <NEW_LINE> <DEDENT> elif self.HP / self.MAX_HP < 0.25: <NEW_LINE> <INDENT> self.colour = BRIGHT_RED
Set the player colour based on health
625941b71b99ca400220a8eb
def collapse(self, queries): <NEW_LINE> <INDENT> return '\n\n'.join(queries)
Turn a sequence of queries into something executable. Parameters ---------- queries : List[str] Returns ------- query : str
625941b715fb5d323cde0943
def select_residential_detail_request(self): <NEW_LINE> <INDENT> url = 'http://isz.ishangzu.com/isz_house/ResidentialController/selectResidentialDetail.action' <NEW_LINE> data = {"residential_id":self.residential_id} <NEW_LINE> result = NewRequest(url, data).post()['obj'] <NEW_LINE> return result
查询楼盘信息 :return:
625941b77d847024c06be0fa
def write_cluster(self, f, val): <NEW_LINE> <INDENT> for write_, value in zip(f, val): <NEW_LINE> <INDENT> if isinstance(write_, tuple): <NEW_LINE> <INDENT> write_[0](write_[1], value) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> write_(value)
Write LabVIEW cluster data type. Parameters ---------- f : iterable Cluster write function(s). val : iterable Cluster value(s). Returns ------- None
625941b730bbd722463cbbfd
def get_domain(self, domain_name): <NEW_LINE> <INDENT> if not self.prefab.core.file_exists("$JSCFGDIR/geodns/dns/%s.json" % domain_name): <NEW_LINE> <INDENT> raise Exception("domain_name not created") <NEW_LINE> <DEDENT> return self.ensure_domain(domain_name)
get domain object with dict of relevant records
625941b76aa9bd52df036bdc
def __init__( self, opp, schema, primary_value, zwave_config, device_config, registry ): <NEW_LINE> <INDENT> self._opp = opp <NEW_LINE> self._zwave_config = zwave_config <NEW_LINE> self._device_config = device_config <NEW_LINE> self._schema = copy.deepcopy(schema) <NEW_LINE> self._values = {} <NEW_LINE> self._entity = None <NEW_LINE> self._workaround_ignore = False <NEW_LINE> self._registry = registry <NEW_LINE> for name in self._schema[const.DISC_VALUES].keys(): <NEW_LINE> <INDENT> self._values[name] = None <NEW_LINE> self._schema[const.DISC_VALUES][name][const.DISC_INSTANCE] = [ primary_value.instance ] <NEW_LINE> <DEDENT> self._values[const.DISC_PRIMARY] = primary_value <NEW_LINE> self._node = primary_value.node <NEW_LINE> self._schema[const.DISC_NODE_ID] = [self._node.node_id] <NEW_LINE> for value in self._node.values.values(): <NEW_LINE> <INDENT> self.check_value(value) <NEW_LINE> <DEDENT> self._check_entity_ready()
Initialize the values object with the passed entity schema.
625941b7d53ae8145f87a0b1
def standEst(dataMat, user, simMeans, product): <NEW_LINE> <INDENT> m, n = shape(dataMat) <NEW_LINE> similary, score = 0, 0 <NEW_LINE> for i in range(n): <NEW_LINE> <INDENT> if dataMat[user, i] == 0: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> calVect = nonzero(logical_and(dataMat[:, i].A > 0, dataMat[:, product].A > 0))[0] <NEW_LINE> if len(calVect) == 0: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> similarity = simMeans(dataMat[calVect, product], dataMat[calVect, i]) <NEW_LINE> similary += similarity <NEW_LINE> score += similarity * dataMat[user, i] <NEW_LINE> <DEDENT> return score / similary if similary > 0 else 0
要对某一物品预估用户对其评价,首先找出此用户已评分的其他商品,根据这些商品与需评分商品的相似度进行评分计算, 预估评分 = sum(某一商品与此商品的相似度 * 此用户评分) / sum(相似度) :param dataMat: 数据集,行对应用户,列对应商品 :param user: 用户,即数据集中对应的行 :param simMeans: 相似度计算方法,共列举了以上3种 :param product: 需预估评分的商品,数据集中用户尚未评分的某一列 :return: 商品的预估评分
625941b738b623060ff0ac29
def test_create_volume_from_encrypted_volume(self): <NEW_LINE> <INDENT> self.mock_object(key_manager, 'API', fake_keymgr.fake_api) <NEW_LINE> cipher = 'aes-xts-plain64' <NEW_LINE> key_size = 256 <NEW_LINE> volume_api = cinder.volume.api.API() <NEW_LINE> ctxt = context.get_admin_context() <NEW_LINE> db.volume_type_create(ctxt, {'id': '61298380-0c12-11e3-bfd6-4b48424183be', 'name': 'LUKS'}) <NEW_LINE> db.volume_type_encryption_create( ctxt, '61298380-0c12-11e3-bfd6-4b48424183be', {'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER, 'cipher': cipher, 'key_size': key_size}) <NEW_LINE> db_vol_type = db.volume_type_get_by_name(context.get_admin_context(), 'LUKS') <NEW_LINE> volume_src = volume_api.create(self.context, 1, 'name', 'description', volume_type=db_vol_type) <NEW_LINE> db.volume_update(self.context, volume_src['id'], {'host': 'fake_host@fake_backend', 'status': 'available'}) <NEW_LINE> volume_src = objects.Volume.get_by_id(self.context, volume_src['id']) <NEW_LINE> volume_dst = volume_api.create(self.context, 1, 'name', 'description', source_volume=volume_src) <NEW_LINE> self.assertEqual(volume_dst['id'], db.volume_get(context.get_admin_context(), volume_dst['id']).id) <NEW_LINE> self.assertEqual(volume_src['id'], db.volume_get(context.get_admin_context(), volume_dst['id']).source_volid) <NEW_LINE> self.assertIsNotNone(volume_src['encryption_key_id']) <NEW_LINE> self.assertIsNotNone(volume_dst['encryption_key_id']) <NEW_LINE> km = volume_api.key_manager <NEW_LINE> volume_src_key = km.get(self.context, volume_src['encryption_key_id']) <NEW_LINE> volume_dst_key = km.get(self.context, volume_dst['encryption_key_id']) <NEW_LINE> self.assertEqual(volume_src_key, volume_dst_key)
Test volume can be created from an encrypted volume.
625941b7cb5e8a47e48b78ea
def visualize_predictions(forecaster, output_dir, data = None): <NEW_LINE> <INDENT> if data is None: <NEW_LINE> <INDENT> data = TimeSeriesData() <NEW_LINE> <DEDENT> date_keys = sorted(data.train.Date.unique()) <NEW_LINE> try: <NEW_LINE> <INDENT> os.makedirs(output_dir) <NEW_LINE> <DEDENT> except FileExistsError: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> row_ids = list(range(1, data.time_count)) <NEW_LINE> plot_rows(data, forecaster, row_ids, "all", output_dir) <NEW_LINE> for week_day, name in enumerate(["Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday", "Monday"]): <NEW_LINE> <INDENT> row_ids = data.train.index[data.train.Date.isin(date_keys[week_day::7])] <NEW_LINE> plot_rows(data, forecaster, row_ids, week_day, output_dir) <NEW_LINE> <DEDENT> store_id = np.random.randint(1, data.store_count) <NEW_LINE> row_ids = data.train.index[data.train.Store == store_id] <NEW_LINE> plot_rows(data=data, forecaster=forecaster, row_ids=row_ids, name="Store-{}".format(store_id), output_dir=output_dir)
visualizes predictions for a forecaster :param forecaster: AbstractForecaster or str where to load a forecaster :param output_dir: str where to save the plots Visualizations: - Avg prediction and error per day - predictions and error for a random store
625941b74a966d76dd550e46
def publish(self, user): <NEW_LINE> <INDENT> data = self.data <NEW_LINE> content = data['content'] <NEW_LINE> weibo_obj = Weibo( content=content, user=user, created_at=datetime.now() ) <NEW_LINE> db.session.add(weibo_obj) <NEW_LINE> at_users = re.findall(constants.AT_USER_PATTEN, content, re.MULTILINE) <NEW_LINE> for nickname in at_users: <NEW_LINE> <INDENT> user = User.query.filter_by(nickname=nickname).first() <NEW_LINE> if user: <NEW_LINE> <INDENT> weibo_at_user = WeiboAtUser( weibo=weibo_obj, user_id=user.id ) <NEW_LINE> db.session.add(weibo_at_user) <NEW_LINE> <DEDENT> <DEDENT> topics = re.findall(constants.TOPIC_PATTEN, content, re.MULTILINE) <NEW_LINE> print (topics) <NEW_LINE> for name in topics: <NEW_LINE> <INDENT> topic = Topic.query.filter_by(name=name).first() <NEW_LINE> if topic is None: <NEW_LINE> <INDENT> topic = Topic( name=name, ) <NEW_LINE> db.session.add(topic) <NEW_LINE> <DEDENT> weibo_rel_topic = WeiboRelTopic( topic=topic, weibo=weibo_obj ) <NEW_LINE> db.session.add(weibo_rel_topic) <NEW_LINE> <DEDENT> db.session.commit() <NEW_LINE> return weibo_obj
发布微博
625941b7b5575c28eb68de38
def file_handler(object_type: str, model_type: Optional[str] = None) -> str: <NEW_LINE> <INDENT> config = get_config() <NEW_LINE> parent_file = Path(__file__).parent.parent / "ml" <NEW_LINE> if model_type: <NEW_LINE> <INDENT> extension = config["file_paths"][object_type][model_type] <NEW_LINE> file_path = parent_file / extension <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> file_path = config["file_paths"][object_type] <NEW_LINE> <DEDENT> return file_path
Find the correct file path for a given input
625941b76e29344779a62450
def main(): <NEW_LINE> <INDENT> log_path = Path(LOG_FLODER, f'LibraryMonitor_{os.getpid()}.log') <NEW_LINE> if not log_path.parent.exists(): <NEW_LINE> <INDENT> log_path.parent.mkdir(parents=True) <NEW_LINE> <DEDENT> set_logger(log_path) <NEW_LINE> monitor = LibraryMonitor(bot_token=BOT_TOKEN) <NEW_LINE> interval_counter = 0 <NEW_LINE> while True: <NEW_LINE> <INDENT> interval_counter += 1 <NEW_LINE> monitor.run() <NEW_LINE> if interval_counter == REPORT_CYCLE: <NEW_LINE> <INDENT> monitor.report_status() <NEW_LINE> interval_counter = 0 <NEW_LINE> <DEDENT> logging.info(f"LibraryMonitor: Sleep for `{SYNC_INTERVAL}`s.") <NEW_LINE> time.sleep(SYNC_INTERVAL) <NEW_LINE> <DEDENT> monitor.stop() <NEW_LINE> logging.warning("LibraryMonitor: Exits.")
Main process.
625941b7f8510a7c17cf953f
def test_softwaresByAddonUverName(self): <NEW_LINE> <INDENT> softwares = self.__getSoftwares() <NEW_LINE> query = Query(softwares) <NEW_LINE> softwareList = query.softwaresByAddonUverName('UVER_A_VERSION') <NEW_LINE> self.assertEqual(len(softwareList), 2) <NEW_LINE> for software in softwareList: <NEW_LINE> <INDENT> self.assertIn(software.name(), ['B', 'C'])
Should return a list of softwares based on the addon uver name.
625941b76aa9bd52df036bdd
def main(): <NEW_LINE> <INDENT> return 1
Main function of controller :rtype: always returns 1 if no syntax errors
625941b79b70327d1c4e0c0e
def calc_all_updates_new(self, rounds=1, FixedNodes={}): <NEW_LINE> <INDENT> N = len(self.nodeDict)-len(FixedNodes) <NEW_LINE> names = [node_name for node_name in self.nodeDict.keys() if not node_name in FixedNodes.keys()] <NEW_LINE> start_values = list(product(range(2), repeat=N)) <NEW_LINE> self.assign_values_to_nodes(FixedNodes) <NEW_LINE> result_values = [] <NEW_LINE> full_start_values = [] <NEW_LINE> for sv_idx, sv in enumerate(start_values): <NEW_LINE> <INDENT> value_dict = {x:sv[i] for i, x in enumerate(names)} <NEW_LINE> self.assign_values_to_nodes(value_dict) <NEW_LINE> full_start_values.append(list(self.CurrentValues.values())) <NEW_LINE> for i in range(rounds): <NEW_LINE> <INDENT> self.update_all(FixedNodes=FixedNodes) <NEW_LINE> <DEDENT> result_values.append(list(self.CurrentValues.values())) <NEW_LINE> <DEDENT> start = np.array(full_start_values) <NEW_LINE> stop = np.array(result_values) <NEW_LINE> return start, stop
Same functionality as calc_all_updates but without the ability to save and retrieve previously calculated values form cache. For documentation see the doctring of calc_all_updates.
625941b78e05c05ec3eea1ac
def xshape(self): <NEW_LINE> <INDENT> return self.values[SHAPE]
Return the shape of this array.
625941b79f2886367277a6cc
def test_solve_N(self): <NEW_LINE> <INDENT> self.assertFalse(Arithmetic('1 2 3', '6 N').solve() == self.N_NP) <NEW_LINE> self.assertFalse(Arithmetic('1 2 3', '5 N').solve() == self.N_NP) <NEW_LINE> self.assertFalse(Arithmetic('1 2 3', '7 N').solve() == self.N_NP) <NEW_LINE> self.assertFalse(Arithmetic('1 2 3 4', '10 N').solve() == self.N_NP) <NEW_LINE> self.assertFalse(Arithmetic('1 2 3 4', '9 N').solve() == self.N_NP) <NEW_LINE> self.assertFalse(Arithmetic('1 2 3 4', '11 N').solve() == self.N_NP) <NEW_LINE> self.assertFalse(Arithmetic('1 2 3 4', '15 N').solve() == self.N_NP) <NEW_LINE> self.assertFalse(Arithmetic('1 2 3 4', '14 N').solve() == self.N_NP) <NEW_LINE> self.assertFalse(Arithmetic('1 2 3 4', '24 N').solve() == self.N_NP) <NEW_LINE> self.assertFalse(Arithmetic('1 2 3 4 5', '30 N').solve() == self.N_NP) <NEW_LINE> self.assertFalse(Arithmetic('1 2 3 4 5 6', '33 N').solve() == self.N_NP) <NEW_LINE> self.assertFalse(Arithmetic('1 2 3 4 5 6', '127 N').solve() == self.N_NP) <NEW_LINE> self.assertFalse(Arithmetic('2 11 8 15', '45 N').solve() == self.N_NP) <NEW_LINE> self.assertTrue(Arithmetic('1 2 3 4 5', '10 N').solve() == self.N_NP)
Are N ordered problems solved correctly?
625941b74f6381625f114881
@click.command() <NEW_LINE> @click.option('--core', default='', help='The location of Processing.R source code.') <NEW_LINE> @click.option('--jar', default='', help='The location of runner.jar') <NEW_LINE> @click.option('--docs-dir', default='', help='The location of Processing.R docs') <NEW_LINE> def generate(core, jar, docs_dir): <NEW_LINE> <INDENT> if core is None or jar is None: <NEW_LINE> <INDENT> click.echo('There is no core or jar.') <NEW_LINE> exit(1) <NEW_LINE> <DEDENT> click.echo('The location of Processing.R source code:%s' % core) <NEW_LINE> click.echo('The location of Processing.R runner.jar: %s' % jar) <NEW_LINE> click.echo('The location of Processing.R docs: %s' % docs_dir) <NEW_LINE> template_dir_short = 'templates' <NEW_LINE> output_reference_dir_short = 'docs/reference' <NEW_LINE> output_tutorial_dir_short = 'docs/tutorials' <NEW_LINE> output_dir_short = 'docs' <NEW_LINE> content_dir_short = 'content' <NEW_LINE> tutorials_dir_short = 'tutorials' <NEW_LINE> global template_dir <NEW_LINE> global output_dir <NEW_LINE> global content_dir <NEW_LINE> global tutorials_dir <NEW_LINE> global output_reference_dir <NEW_LINE> global output_tutorials_dir <NEW_LINE> template_dir = os.path.join(docs_dir, template_dir_short) <NEW_LINE> output_dir = os.path.join(docs_dir, output_dir_short) <NEW_LINE> content_dir = os.path.join(docs_dir, content_dir_short) <NEW_LINE> tutorials_dir = os.path.join(docs_dir, tutorials_dir_short) <NEW_LINE> output_reference_dir = os.path.join(docs_dir, output_reference_dir_short) <NEW_LINE> output_tutorials_dir = os.path.join(docs_dir, output_tutorial_dir_short) <NEW_LINE> env = jinja2.Environment(loader=jinja2.FileSystemLoader( template_dir), trim_blocks='true') <NEW_LINE> generator = Generator(core, env, jar) <NEW_LINE> generator.generate()
Generate Processing.R web reference.
625941b721bff66bcd684790
def items(self, prog): <NEW_LINE> <INDENT> self.current_prog = prog <NEW_LINE> if prog.context == 'more_programs': <NEW_LINE> <INDENT> return [] <NEW_LINE> <DEDENT> return [(self.more_programs, _('Search for more of this program'))]
Get actions for prog.
625941b75fc7496912cc37c1
def delete_trust(self, trust, ignore_missing=False): <NEW_LINE> <INDENT> self._delete(_trust.Trust, trust, ignore_missing=ignore_missing)
Delete a trust :param trust: The value can be either the ID of a trust or a :class:`~ecl.identity.v3.trust.Trust` instance. :param bool ignore_missing: When set to ``False`` :class:`~ecl.exceptions.ResourceNotFound` will be raised when the credential does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent credential. :returns: ``None``
625941b7cdde0d52a9e52e69
def column_order(self): <NEW_LINE> <INDENT> return ((1, 4), (1, 0), (1, 1), (1, 2), (1, 3))
Return the column order of the columns in the display tab.
625941b79b70327d1c4e0c0f
def __data_generation(self, list_IDs_temp): <NEW_LINE> <INDENT> X = np.empty((self.batch_size, *self.dim)) <NEW_LINE> for i, ID in enumerate(list_IDs_temp): <NEW_LINE> <INDENT> X[i,] = np.load(ID, allow_pickle=True) <NEW_LINE> <DEDENT> if self.save_y: <NEW_LINE> <INDENT> self.y = np.append(self.y, X[:,0,-1]) <NEW_LINE> <DEDENT> if self.transpose: <NEW_LINE> <INDENT> xT = np.zeros((self.batch_size,1,X[0,].shape[1]-1,X[0,].shape[0])) <NEW_LINE> for i in range(self.batch_size): <NEW_LINE> <INDENT> xT[i,0,:,:] = X[i,:,:-1].T <NEW_LINE> <DEDENT> return xT, np.array(X[:,0,-1]) <NEW_LINE> <DEDENT> if self.transpose1D: <NEW_LINE> <INDENT> xT = np.zeros((self.batch_size,X[0,].shape[1]-1,X[0,].shape[0])) <NEW_LINE> for i in range(self.batch_size): <NEW_LINE> <INDENT> xT[i,:,:] = X[i,:,:-1].T <NEW_LINE> <DEDENT> return xT, np.array(X[:,0,-1]) <NEW_LINE> <DEDENT> return X[:,:,:-1], np.array(X[:,0,-1])
Generates data containing batch_size samples
625941b785dfad0860c3ac94
def _initNoteDictionnary(self): <NEW_LINE> <INDENT> strings = self._maxStrings <NEW_LINE> frets = self._maxFrets <NEW_LINE> dict = [[0 for i in range(frets)] for i in range(strings)] <NEW_LINE> dict[0][0] = (5, 4) <NEW_LINE> dict[1][0] = (12, 3) <NEW_LINE> dict[2][0] = (8, 3) <NEW_LINE> dict[3][0] = (3, 3) <NEW_LINE> dict[4][0] = (10, 2) <NEW_LINE> dict[5][0] = (5, 2) <NEW_LINE> for i in range(strings): <NEW_LINE> <INDENT> for j in range(1, frets): <NEW_LINE> <INDENT> baseNote = dict[i][j - 1] <NEW_LINE> octave = baseNote[1] <NEW_LINE> note = baseNote[0] + 1 <NEW_LINE> if baseNote[0] + 1 > 12: <NEW_LINE> <INDENT> octave += 1 <NEW_LINE> note = 0 <NEW_LINE> <DEDENT> dict[i][j] = (note, octave) <NEW_LINE> <DEDENT> <DEDENT> return dict
Initialise a note dictionnary representing the guitar
625941b731939e2706e4ccab
def testTrillExtensionA(self): <NEW_LINE> <INDENT> from music21 import stream, note, chord, expressions <NEW_LINE> from music21.musicxml import m21ToString <NEW_LINE> s = stream.Stream() <NEW_LINE> s.repeatAppend(note.Note(), 12) <NEW_LINE> n1 = s.notes[0] <NEW_LINE> n2 = s.notes[-1] <NEW_LINE> sp1 = expressions.TrillExtension(n1, n2) <NEW_LINE> s.append(sp1) <NEW_LINE> raw = m21ToString.fromMusic21Object(s) <NEW_LINE> self.assertEqual(raw.count('wavy-line'), 2) <NEW_LINE> s = stream.Stream() <NEW_LINE> s.repeatAppend(chord.Chord(['c-3', 'g4']), 12) <NEW_LINE> n1 = s.notes[0] <NEW_LINE> n2 = s.notes[-1] <NEW_LINE> sp1 = expressions.TrillExtension(n1, n2) <NEW_LINE> s.append(sp1) <NEW_LINE> raw = m21ToString.fromMusic21Object(s) <NEW_LINE> self.assertEqual(raw.count('wavy-line'), 2)
Test basic wave line creation and output, as well as passing objects through make measure calls.
625941b763d6d428bbe4432a
def sawtooth(self, vertices_array, num_teeth, spike_side = 1, spike_dir = 1): <NEW_LINE> <INDENT> self.vertices = np.asarray(vertices_array) <NEW_LINE> self.vdist = pdist(self.vertices) <NEW_LINE> self.width = self.vdist / num_teeth <NEW_LINE> self.height = self.width <NEW_LINE> self.side = spike_side <NEW_LINE> self.dir = spike_dir <NEW_LINE> self.x_v2 = np.asarray(vertices_array)[:,0] <NEW_LINE> self.y_v2 = np.asarray(vertices_array)[:,1] <NEW_LINE> self.theta = 0 <NEW_LINE> self.norm = normal(self.x_v2, self.y_v2, self.side) <NEW_LINE> self.norm = self.norm * length(self.norm)**(-1) <NEW_LINE> self.trig_wall_x_v = self.x_v2 + self.norm[0] * (self.height + 1.1 * max(radii)) <NEW_LINE> self.trig_wall_y_v = self.y_v2 + self.norm[1] * (self.height + 1.1 * max(radii)) <NEW_LINE> self.angle_sign = np.sign((self.y_v2[1]-self.y_v2[0])/(self.x_v2[1]-self.x_v2[0])) <NEW_LINE> if self.angle_sign == 0: <NEW_LINE> <INDENT> self.angle_sign = 1 <NEW_LINE> <DEDENT> c = 1 <NEW_LINE> if self.y_v2[1] < self.y_v2[0]: <NEW_LINE> <INDENT> if self.x_v2[1] != self.x_v2[0]: <NEW_LINE> <INDENT> c = -1 <NEW_LINE> <DEDENT> <DEDENT> self.y_v = np.linspace(0, self.vdist, self.vdist * 1000) <NEW_LINE> self.x_v = self.side*0.5*self.height*(signal.sawtooth(2 * np.pi * self.width**(-1) * self.y_v) + 1) <NEW_LINE> if self.dir == -1: <NEW_LINE> <INDENT> self.x_v = self.x_v[::-1] <NEW_LINE> <DEDENT> if self.x_v2[0] != self.x_v2[1]: <NEW_LINE> <INDENT> self.theta = findangle(np.array([self.x_v2[1]-self.x_v2[0], self.y_v2[1]-self.y_v2[0]])) <NEW_LINE> self.x_prime = self.x_v * math.cos(self.theta) + c*self.angle_sign * self.y_v * math.sin(self.theta) <NEW_LINE> self.y_prime = (-1) * c*self.angle_sign * self.x_v * math.sin(self.theta) + self.y_v * math.cos(self.theta) <NEW_LINE> <DEDENT> elif self.x_v2[0] == self.x_v2[1]: <NEW_LINE> <INDENT> self.x_prime = self.x_v <NEW_LINE> self.y_prime = self.y_v <NEW_LINE> <DEDENT> if self.angle_sign == 1: <NEW_LINE> <INDENT> self.x_fin = c * self.x_prime + min(self.x_v2) <NEW_LINE> self.y_fin = c * self.y_prime + min(self.y_v2) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.x_fin = c * self.x_prime + max(self.x_v2) <NEW_LINE> self.y_fin = c * self.y_prime + min(self.y_v2)
input two vertices to create wall between as list [[Xo,Yo],[X1,Y1]], no. of triangular teeth as integer, side want spikes on: 1 = right, -1 = left direction want spikes in: -1 reverses
625941b7187af65679ca4f58
def is_complete_intersection(self): <NEW_LINE> <INDENT> singular.lib("sing.lib") <NEW_LINE> I = singular.simplify(self.defining_ideal(), 10) <NEW_LINE> L = singular.is_ci(I).sage() <NEW_LINE> return len(self.ambient_space().gens()) - len(I.sage().gens()) == L[-1]
Return whether this projective curve is or is not a complete intersection. OUTPUT: Boolean. EXAMPLES:: sage: P.<x,y,z,w> = ProjectiveSpace(QQ, 3) sage: C = Curve([x*y - z*w, x^2 - y*w, y^2*w - x*z*w], P) sage: C.is_complete_intersection() False :: sage: P.<x,y,z,w> = ProjectiveSpace(QQ, 3) sage: C = Curve([y*w - x^2, z*w^2 - x^3], P) sage: C.is_complete_intersection() True sage: P.<x,y,z,w> = ProjectiveSpace(QQ, 3) sage: C = Curve([z^2 - y*w, y*z - x*w, y^2 - x*z], P) sage: C.is_complete_intersection() False
625941b7442bda511e8be261
def get_nsheets(self): <NEW_LINE> <INDENT> return self._book.nsheets
返回所有sheets组成的链表。
625941b7dc8b845886cb536f
def sunos_cpuinfo(): <NEW_LINE> <INDENT> ret = {} <NEW_LINE> ret["isainfo"] = {} <NEW_LINE> for line in __salt__["cmd.run"]("isainfo -x").splitlines(): <NEW_LINE> <INDENT> if not line: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> comps = line.split(":") <NEW_LINE> comps[0] = comps[0].strip() <NEW_LINE> ret["isainfo"][comps[0]] = sorted(comps[1].strip().split()) <NEW_LINE> <DEDENT> ret["psrinfo"] = [] <NEW_LINE> procn = None <NEW_LINE> for line in __salt__["cmd.run"]("psrinfo -v -p").splitlines(): <NEW_LINE> <INDENT> if not line: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> if line.startswith("The physical processor"): <NEW_LINE> <INDENT> procn = len(ret["psrinfo"]) <NEW_LINE> line = line.split() <NEW_LINE> ret["psrinfo"].append({}) <NEW_LINE> if "cores" in line: <NEW_LINE> <INDENT> ret["psrinfo"][procn]["topology"] = {} <NEW_LINE> ret["psrinfo"][procn]["topology"]["cores"] = _number(line[4]) <NEW_LINE> ret["psrinfo"][procn]["topology"]["threads"] = _number(line[7]) <NEW_LINE> <DEDENT> elif "virtual" in line: <NEW_LINE> <INDENT> ret["psrinfo"][procn]["topology"] = {} <NEW_LINE> ret["psrinfo"][procn]["topology"]["threads"] = _number(line[4]) <NEW_LINE> <DEDENT> <DEDENT> elif line.startswith(" " * 6): <NEW_LINE> <INDENT> ret["psrinfo"][procn]["name"] = line.strip() <NEW_LINE> <DEDENT> elif line.startswith(" " * 4): <NEW_LINE> <INDENT> line = line.strip().split() <NEW_LINE> ret["psrinfo"][procn]["vendor"] = line[1][1:] <NEW_LINE> ret["psrinfo"][procn]["family"] = _number(line[4]) <NEW_LINE> ret["psrinfo"][procn]["model"] = _number(line[6]) <NEW_LINE> ret["psrinfo"][procn]["step"] = _number(line[8]) <NEW_LINE> ret["psrinfo"][procn]["clock"] = "{} {}".format(line[10], line[11][:-1]) <NEW_LINE> <DEDENT> <DEDENT> return ret
sunos specific cpuinfo implementation
625941b750485f2cf553cbd4
def market_last_split_train(market=None): <NEW_LINE> <INDENT> if market is None: <NEW_LINE> <INDENT> market = ABuEnv.g_market_target <NEW_LINE> <DEDENT> market_name = market.value <NEW_LINE> last_path_train = '{}_{}'.format(K_MARKET_TRAIN_FN_BASE, market_name) <NEW_LINE> if not ABuFileUtil.file_exist(last_path_train): <NEW_LINE> <INDENT> raise RuntimeError('g_enable_last_split_train not ZCommonUtil.fileExist(fn)!') <NEW_LINE> <DEDENT> market_symbols = ABuFileUtil.load_pickle(last_path_train) <NEW_LINE> return market_symbols
使用最后一次切割好的训练集symbols数据 :param market: 待获取测试集市场,eg:EMarketTargetType.E_MARKET_TARGET_US :return: 最后一次切割好的训练集symbols数据
625941b7090684286d50eb1b
def p_for_op(p): <NEW_LINE> <INDENT> p[0] = None
for_op : PLUS varcte | MINUS varcte
625941b799fddb7c1c9de1ce
def get_model(url): <NEW_LINE> <INDENT> _ensure_absolute(url) <NEW_LINE> return MODEL_URL_TO_SLUMBER_MODEL[url]
Return the client model connector for a gven URL.
625941b7fbf16365ca6f5ff7
def patched_cause_emd(d1, d2): <NEW_LINE> <INDENT> global n_repertoires <NEW_LINE> purview_size = pyphi.utils.purview_size(d1) <NEW_LINE> repertoires[purview_size].append([d1, d2]) <NEW_LINE> n_repertoires += 1 <NEW_LINE> if n_repertoires == MAX_REPERTOIRES: <NEW_LINE> <INDENT> report() <NEW_LINE> <DEDENT> return _CAUSE_EMD(d1, d2)
Patched cause EMD which records all repertoire arguments in the `repertoires` variable.
625941b745492302aab5e0fb
def setgeo(rundata): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> geo_data = rundata.geo_data <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> print("*** Error, this rundata has no geo_data attribute") <NEW_LINE> raise AttributeError("Missing geo_data attribute") <NEW_LINE> <DEDENT> geo_data.gravity = 9.81 <NEW_LINE> geo_data.coordinate_system = 1 <NEW_LINE> geo_data.earth_radius = 6367.5e3 <NEW_LINE> geo_data.coriolis_forcing = False <NEW_LINE> geo_data.dry_tolerance = 1.e-1 <NEW_LINE> geo_data.friction_forcing = True <NEW_LINE> geo_data.manning_coefficient = 0.025 <NEW_LINE> geo_data.friction_depth = 20.0 <NEW_LINE> geo_data.rho = [0.9, 1.0] <NEW_LINE> refinement_data = rundata.refinement_data <NEW_LINE> refinement_data.wave_tolerance = 1.e-2 <NEW_LINE> refinement_data.deep_depth = 1e2 <NEW_LINE> refinement_data.max_level_deep = 3 <NEW_LINE> refinement_data.variable_dt_refinement_ratios = True <NEW_LINE> topo_data = rundata.topo_data <NEW_LINE> topo_data.topofiles.append([2, 1, 3, 0., 1.e10, 'bowl.topotype2']) <NEW_LINE> dtopo_data = rundata.dtopo_data <NEW_LINE> rundata.qinit_data.qinit_type = 4 <NEW_LINE> rundata.qinit_data.qinitfiles = [] <NEW_LINE> rundata.qinit_data.qinitfiles.append([1, 3, 'hump.xyz']) <NEW_LINE> fixedgrids = rundata.fixed_grid_data.fixedgrids <NEW_LINE> return rundata
Set GeoClaw specific runtime parameters. For documentation see ....
625941b7d164cc6175782b89
def _create_auxillary_rewards_model(self, shared_state, action_input_shape, num_aux_rewards): <NEW_LINE> <INDENT> action = Sequential([ Dense(shared_state.layers[-1].output_shape[1], activation='relu',input_shape=action_input_shape, name='auxillary_dense_1'), ]) <NEW_LINE> mult = Multiply()([action.output, shared_state.output]) <NEW_LINE> merged = Dense(64, activation='relu', name='auxillary_merged_dense')(mult) <NEW_LINE> merged = Dense(32, activation='relu', name='auxillary_dense')(merged) <NEW_LINE> merged = Dense(num_aux_rewards, activation='tanh', name='auxillary_out')(merged) <NEW_LINE> model = Model(inputs=[shared_state.input, action.input], outputs=merged) <NEW_LINE> opt = optimizers.Adam(clipnorm=1.) <NEW_LINE> model.compile(optimizer=opt, loss='logcosh') <NEW_LINE> return model
Create a seperate 'head' for predicting domain knowledge AKA Auxillary features "auxillary Reward Architecture for Reinforcement Learning", https://arxiv.org/pdf/1706.04208.pdf, Seijen et al. "REINFORCEMENT LEARNING WITH UNSUPERVISED AUXILIARY TASKS" https://arxiv.org/pdf/1611.05397.pdf, Jaderberg et al. Create a second head that will be trained on domain knowledge of some kind, like pixel control or other domain knowlege.
625941b726068e7796caeb14
def makeCsrTriSolveRowLoopBounds (upLo, startRow, endRowPlusOne): <NEW_LINE> <INDENT> if upLo == 'upper': <NEW_LINE> <INDENT> return 'Ordinal r = ' + endRowPlusOne + ' - 1; r >= ' + startRow + '; --r' <NEW_LINE> <DEDENT> elif upLo == 'lower': <NEW_LINE> <INDENT> return 'Ordinal r = ' + startRow + '; r < ' + endRowPlusOne + ' - 1; ++r' <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError ('Invalid upLo "' + upLo + '"')
Make loop bounds for CSR sparse triangular solve's loop over rows. CSR stands for "compressed sparse row," the sparse matrix storage format that this code expects. upLo: 'lower' if generating loop bounds for lower triangular solve; 'upper' for upper triangular solve. startRow: Identifier name or integer constant which is the least (zero-based) row index of the sparse matrix over which to iterate. For iterating over the whole sparse matrix, this should be '0'. endRowPlusOne: Identifier name or integer constant which is the largest (zero-based) row index of the sparse matrix over which to iterate, plus one. Adding one means that startRow, endRowPlusOne makes an exclusive index range. For iterating over the whole sparse matrix, this should be 'numRows'. Lower triangular solve loops over rows in forward order; upper triangular solve in reverse order. This function generates what goes inside the parentheses for the 'for' loop over rows. The generated code assumes that 'Ordinal' is an integer type suitable for iterating over those rows, and that it can use 'r' as the loop index.
625941b729b78933be1e54f4
def build_payload(url, token, params, **kwargs): <NEW_LINE> <INDENT> headers = { 'Accept-Encoding': 'gzip,deflate', 'content-type': 'text/plain'} <NEW_LINE> transformed_params = rb.build_param_map(params, token) <NEW_LINE> log.debug("transformed_params") <NEW_LINE> log.debug(transformed_params) <NEW_LINE> response = requests.get(url, params = transformed_params, headers=headers) <NEW_LINE> log.debug("Response Text:") <NEW_LINE> log.debug(response.text) <NEW_LINE> response.raise_for_status() <NEW_LINE> parsed_response = json.loads(response.text) <NEW_LINE> return parsed_response
Build response payload for a request, pass parameters and optional parameters send external parameters for mapping and valiadtion parse json like string return json object :type url: string :param url: request url :type token: string :param token: ARAPORT API token (internal parameter) :type params: string :param params: request parameters :type kwargs: string :param kwargs: optional request parameters :rtype: response json :return: Returns actual response payload from the webservice in the json format if success raises exception otherwise
625941b71f5feb6acb0c4990
def startStream(self, msg, request): <NEW_LINE> <INDENT> connection = self.channel_set.connection_manager.getConnection(msg.headers.get(msg.FLEX_CLIENT_ID_HEADER)) <NEW_LINE> if self.channel_set.notify_connections is True: <NEW_LINE> <INDENT> poller = None <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> poller = task.LoopingCall(None) <NEW_LINE> <DEDENT> _connectionLost = request.connectionLost <NEW_LINE> def _connection_lost(reason): <NEW_LINE> <INDENT> self.channel_set.disconnect(connection) <NEW_LINE> connection.unSetNotifyFunc() <NEW_LINE> _connectionLost(reason) <NEW_LINE> <DEDENT> request.connectionLost = _connection_lost <NEW_LINE> def _notify(): <NEW_LINE> <INDENT> if connection.connected is False: <NEW_LINE> <INDENT> if poller is not None: <NEW_LINE> <INDENT> poller.stop() <NEW_LINE> <DEDENT> connection.unSetNotifyFunc() <NEW_LINE> msg = messaging.StreamingMessage.getDisconnectMsg() <NEW_LINE> request.write(messaging.StreamingMessage.prepareMsg(msg, self.endpoint)) <NEW_LINE> request.finish() <NEW_LINE> return <NEW_LINE> <DEDENT> msgs = self.channel_set.subscription_manager.pollConnection(connection) <NEW_LINE> for msg in msgs: <NEW_LINE> <INDENT> request.write(messaging.StreamingMessage.prepareMsg(msg, self.endpoint)) <NEW_LINE> <DEDENT> <DEDENT> connection.setNotifyFunc(_notify) <NEW_LINE> if poller is not None: <NEW_LINE> <INDENT> poller.f = _notify <NEW_LINE> poller.start(float(self.poll_interval) / 1000, False) <NEW_LINE> <DEDENT> response = msg.acknowledge() <NEW_LINE> response.body = connection.id <NEW_LINE> self.sendMsg(request, response) <NEW_LINE> request.write(chr(messaging.StreamingMessage.NULL_BYTE) * self.KICKSTART_BYTES) <NEW_LINE> self.startBeat(connection, request)
Get this stream rolling!
625941b7460517430c393fca
def category_exists(url, cursor): <NEW_LINE> <INDENT> result = cursor.execute('SELECT url, "text" FROM CATEGORY WHERE url=?', (url,)) <NEW_LINE> return result.fetchone() is not None
:param item_guid The guid of the item to retrieve the categories from the database. :type item_guid String :return The categories from the item with the specified guid.
625941b794891a1f4081b8e3
def setup(self, artifact_root, testnum): <NEW_LINE> <INDENT> super(FioJobTest, self).setup(artifact_root, testnum) <NEW_LINE> self.command_file = os.path.join( self.test_dir, "{0}.command".format(os.path.basename(self.fio_job))) <NEW_LINE> self.stdout_file = os.path.join( self.test_dir, "{0}.stdout".format(os.path.basename(self.fio_job))) <NEW_LINE> self.stderr_file = os.path.join( self.test_dir, "{0}.stderr".format(os.path.basename(self.fio_job))) <NEW_LINE> self.exitcode_file = os.path.join( self.test_dir, "{0}.exitcode".format(os.path.basename(self.fio_job)))
Setup instance variables for fio job test.
625941b7dc8b845886cb5370
def resolveLinks(self, _act, framer, frame, **kwa): <NEW_LINE> <INDENT> parms = {} <NEW_LINE> parms.update(super(ClonerFramer,self).resolveLinks(_act, **kwa)) <NEW_LINE> parms['framer'] = framing.resolveFramer(framer, who=self.name) <NEW_LINE> parms['frame'] = framing.resolveFrame(frame, who=self.name) <NEW_LINE> return parms
Resolves value (tasker) link that is passed in as parm resolved link is passed back to act to store in parms since framer may not be current framer at build time
625941b77047854f462a1248
def suite(): <NEW_LINE> <INDENT> utilsTests.init() <NEW_LINE> suites = [] <NEW_LINE> suites += unittest.makeSuite(testCoordinateTransformations) <NEW_LINE> return unittest.TestSuite(suites)
Returns a suite containing all the test cases in this module.
625941b7379a373c97cfa986
def __str__(self): <NEW_LINE> <INDENT> return '[Imagine emission model]'
return a string representation of the emission model
625941b77047854f462a1249
@app.route('/test_template', methods=['POST']) <NEW_LINE> @login_required <NEW_LINE> def test_template(): <NEW_LINE> <INDENT> picture_file = request.files['picture'] <NEW_LINE> if picture_file and allowed_file(picture_file.filename): <NEW_LINE> <INDENT> picture = Image.open(picture_file) <NEW_LINE> w, h = picture.size <NEW_LINE> if w != h: <NEW_LINE> <INDENT> return jsonify(error='The picture must have a square shape'), 400 <NEW_LINE> <DEDENT> photobooth = get_photobooth() <NEW_LINE> photobooth.render_print_and_upload(picture_file.getvalue()) <NEW_LINE> return jsonify(message='Ticket successfully printed')
Print a ticket with the picture uploaded by the user
625941b7097d151d1a222c97
def check_symb_frac(self, txt, f=0.35): <NEW_LINE> <INDENT> return np.sum([not ch.isalnum() for ch in txt])/(len(txt)+0.0) <= f
T/F return : T iff fraction of symbol/special-charcters in txt is less than or equal to f (default=0.25).
625941b7627d3e7fe0d68c8a
def print_pipes(self): <NEW_LINE> <INDENT> for pipeobj in self.pipes: <NEW_LINE> <INDENT> print(pipeobj) <NEW_LINE> <DEDENT> return
dump pipe objects
625941b78da39b475bd64db2