code
stringlengths 4
4.48k
| docstring
stringlengths 1
6.45k
| _id
stringlengths 24
24
|
---|---|---|
def get_service(self, bundle, registration): <NEW_LINE> <INDENT> os.environ['factory.get'] = "OK" <NEW_LINE> return RegistrationKeeper(self.reg, registration) | Provides a new service | 625941baa79ad161976cbfed |
def takeReading(self): <NEW_LINE> <INDENT> if (self.node.serLinkActive == 1): <NEW_LINE> <INDENT> cmdMsg = self.node.nodeMsgID + "1" <NEW_LINE> sensResp = self.node.hub.ser.sendCmdNReadResp(cmdMsg) <NEW_LINE> resultList = sensResp.split(";") <NEW_LINE> command = int(resultList[0]) <NEW_LINE> RFIDid = resultList[1] <NEW_LINE> state = int(resultList[2]) <NEW_LINE> if (RFIDid == self.instID and command == 1): <NEW_LINE> <INDENT> currDate = datetime.today() <NEW_LINE> numCurrDate = conDateTimeToNum(currDate) <NEW_LINE> self.data.append([numCurrDate,state]) <NEW_LINE> self.storeReading(numCurrDate,state) <NEW_LINE> self.verifyThresholds() | Take a reading from the RFID sensor that is connected to the Arduino microcontroller
Return :
- none | 625941bad7e4931a7ee9ddc4 |
def test_unicode_with_active_language(self): <NEW_LINE> <INDENT> def mock_get_translation(text, hint, language_code): <NEW_LINE> <INDENT> if language_code == "de": <NEW_LINE> <INDENT> return {"singular": "translated", "o": "translated"} <NEW_LINE> <DEDENT> return {"singular": self.text} <NEW_LINE> <DEDENT> translation.activate("de") <NEW_LINE> with sleuth.switch( "fluent.fields.trans.TRANSLATION_CACHE.get_translation", mock_get_translation ): <NEW_LINE> <INDENT> obj = TranslatableContent(text=u'\xc5ukasz') <NEW_LINE> result = unicode(obj) <NEW_LINE> <DEDENT> self.assertEqual(result, u'translated') <NEW_LINE> self.assertIsInstance(result, unicode) | If there's a currently-active language, unicode should return the translated text. | 625941ba7c178a314d6ef302 |
def __init__(self, cells, quadrilateral, N = 100, fiducial_distance = 0, list_of_segments = []): <NEW_LINE> <INDENT> self.cells = cells <NEW_LINE> self.perimeter = quadrilateral <NEW_LINE> triangle1, triangle2 = splitquadrilateral(quadrilateral.points) <NEW_LINE> self.triangle1 = triangle1 <NEW_LINE> self.triangle2 = triangle2 <NEW_LINE> self.N = N <NEW_LINE> self.fiducial_distance = fiducial_distance <NEW_LINE> self.list_of_segments = list_of_segments <NEW_LINE> self.__prepare_fiducial_cut__() <NEW_LINE> self.points = [] <NEW_LINE> self.cell_list = [] <NEW_LINE> self.__cells_within_perimeter__() <NEW_LINE> self.__generate_filtered__() <NEW_LINE> self.__determine_bbox__() <NEW_LINE> self.__area__() | Give the list of other cells and a rectangular perimeter that is guaranteed to cover this
cell, and is covered ENTIRELY by the other cells of the Mesh. Quadrilateral is a an np array of four points. A point is a list of two coordinates: v and w.
Spurious points may very rarely arise close to the perimeter, due to the way the fiducial volume within the paparmeter is cut off from the rest of the mesh.
It is not worth to code around this, at the moment. It is recommended to do a visual inspection to see where and how this happens. If a fiducial
distance > 0 is defined, all points within that distance from the perimeter will be labeled as not in the bin. It is possible to exclude sides from
this cut, which is necessary when the perimeter directly bounds the reset bin. list_of_segments is a list of indices that indicates which side of the
quadrilaterals should be left out of this cut. The side defined by qudrilateral[0] and quadrilateral[1] is indexed by 0, etc. | 625941ba7cff6e4e8111782e |
def main( manifest_file, product, component, version, changelist, p4_location, timestamp, submit ): <NEW_LINE> <INDENT> logging.info('----- New run: %s -----', now) <NEW_LINE> if product: <NEW_LINE> <INDENT> if version: <NEW_LINE> <INDENT> manifest_file_to_submit = updateProductDetailsInManifest(manifest_file, 'version', version) <NEW_LINE> <DEDENT> if changelist: <NEW_LINE> <INDENT> manifest_file_to_submit = updateProductDetailsInManifest(manifest_file, 'changelist', changelist) <NEW_LINE> <DEDENT> if timestamp: <NEW_LINE> <INDENT> ts_value = computeTimestamp() <NEW_LINE> manifest_file_to_submit = updateProductDetailsInManifest(manifest_file, 'timestamp', ts_value) <NEW_LINE> <DEDENT> <DEDENT> if component: <NEW_LINE> <INDENT> if version: <NEW_LINE> <INDENT> manifest_file_to_submit = updateComponentDetailsInManifest(manifest_file, component, 'version', version) <NEW_LINE> <DEDENT> if changelist: <NEW_LINE> <INDENT> manifest_file_to_submit = updateComponentDetailsInManifest(manifest_file, component, 'changelist', changelist) <NEW_LINE> <DEDENT> if p4_location: <NEW_LINE> <INDENT> manifest_file_to_submit = updateComponentDetailsInManifest(manifest_file, component, 'p4_location', p4_location) <NEW_LINE> <DEDENT> <DEDENT> if submit: <NEW_LINE> <INDENT> p4_init(p4_port, p4_client, p4_user, p4_password) <NEW_LINE> manifest_file_name = os.path.basename(manifest_file_to_submit) <NEW_LINE> product_name, ignore_rest = str(manifest_file_name).split("-", 1) <NEW_LINE> manifest_file_depot_path = p4_location + '/' + manifest_file_name <NEW_LINE> p4_edit_manifest(manifest_file_depot_path, component, version, changelist, p4_location) <NEW_LINE> p4_submit_change() <NEW_LINE> p4.disconnect() | The only valid/useful ways to call this script are as follows:
$python_bin update_manifest.py --manifest_file ${manifest_file} --product --version ${version}
$python_bin update_manifest.py --manifest_file ${manifest_file} --product --version ${version} --changelist ${changelist} --timestamp
$python_bin update_manifest.py --manifest_file ${manifest_file} --component ${component} --version ${version} --changelist ${changelist} --p4_location ${p4_location} --submit
However, we need to be sure we account for missing parameters. | 625941ba1f5feb6acb0c49fd |
def verify(self, data): <NEW_LINE> <INDENT> return self.gpg.verify(data) | Verify signature and return results | 625941ba4e696a04525c92f5 |
def test_build_vars(self): <NEW_LINE> <INDENT> self.failUnless(isinstance(self.interactor.interactor_shadow, ShadowInteractor)) <NEW_LINE> self.failUnless(isinstance(self.interactor.interactor_parametric, ParametricInteractor)) <NEW_LINE> self.failUnless(isinstance( self.interactor.interactor_stochastic_constant,StochasticInteractor)) <NEW_LINE> self.failUnless(isinstance( self.interactor.interactor_stochastic_gaussian,StochasticInteractor)) <NEW_LINE> self.failUnless(isinstance( self.interactor.interactor_stochastic_triangular, StochasticInteractor)) <NEW_LINE> self.failUnless(isinstance( self.interactor.interactor_stochastic_uniform, StochasticInteractor)) | Does the interactor create the right traits for the types of
of interactors | 625941bae64d504609d746e9 |
def complex_intensity(probe_grid): <NEW_LINE> <INDENT> return np.square(np.abs(probe_grid)) | Intensity of the complex wave. | 625941ba004d5f362079a1df |
def check_costs(costs, eps0=1e-3, eps1=1e-9): <NEW_LINE> <INDENT> assert abs(costs[0]._A).max() > eps0 <NEW_LINE> assert abs(costs[0]._B).max() > eps0 <NEW_LINE> for i in xrange(nrep-1): <NEW_LINE> <INDENT> assert abs(costs[i]._A - costs[i+1]._A).max() < eps1 <NEW_LINE> assert abs(costs[i]._B - costs[i+1]._B).max() < eps1 <NEW_LINE> assert abs(costs[i]._C - costs[i+1]._C) < eps1 | Test if all given cost functions are equivalent.
Parameters
----------
costs : list
A list of cost functions
eps0 : float
The threshold used to test that A and B matrices are non-trivial. If the
maximum absolute value of the matrix elements of cost._A or cost._B are
below this threshold, the test fails.
eps1 : float
The allowed devation between matrix elements of two cost functions. | 625941badc8b845886cb53dd |
def StartLogging(logger=''): <NEW_LINE> <INDENT> string_io = StringIO.StringIO() <NEW_LINE> stream_handler = StreamHandler(string_io) <NEW_LINE> GetLogger(logger).AddHandler(stream_handler) <NEW_LINE> return _LogHandle(logger, stream_handler, string_io) | To be used as:
with StartLogging() as logger:
... Do something
log = logger.GetRecordedLog()
... check the log.
:param str logger:
The logger context to be logged. | 625941ba4f88993c3716bf1c |
def test_create_instance_with_oversubscribed_ram_fail(self): <NEW_LINE> <INDENT> self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) <NEW_LINE> self.compute.resource_tracker.update_available_resource( self.context.elevated()) <NEW_LINE> resources = self.compute.driver.get_available_resource() <NEW_LINE> total_mem_mb = resources['memory_mb'] <NEW_LINE> oversub_limit_mb = total_mem_mb * 1.5 <NEW_LINE> instance_mb = int(total_mem_mb * 1.55) <NEW_LINE> params = {"memory_mb": instance_mb, "root_gb": 128, "ephemeral_gb": 128} <NEW_LINE> instance = self._create_fake_instance(params) <NEW_LINE> filter_properties = {'limits': {'memory_mb': oversub_limit_mb}} <NEW_LINE> self.assertRaises(exception.ComputeResourcesUnavailable, self.compute.run_instance, self.context, instance=instance, filter_properties=filter_properties) | Test passing of oversubscribed ram policy from the scheduler, but
with insufficient memory. | 625941ba56ac1b37e626407e |
def run_autotest_distro_detect(test, params, env): <NEW_LINE> <INDENT> vm = env.get_vm(params["main_vm"]) <NEW_LINE> vm.verify_alive() <NEW_LINE> timeout = int(params.get("login_timeout", 360)) <NEW_LINE> session = vm.wait_for_login(timeout=timeout) <NEW_LINE> timeout = int(params.get("test_timeout", 90)) <NEW_LINE> control_path = generate_control_file(params) <NEW_LINE> outputdir = test.outputdir <NEW_LINE> utils_test.run_autotest(vm, session, control_path, timeout, outputdir, params) | Run an distro detection check on guest
:param test: kvm test object.
:param params: Dictionary with test parameters.
:param env: Dictionary with the test environment. | 625941baeab8aa0e5d26da07 |
def onPreCleanup(self): <NEW_LINE> <INDENT> logging.debug("pre cleanup called") <NEW_LINE> try: <NEW_LINE> <INDENT> logging.debug("Tests context before cleanup: %s" % self.testsContext) <NEW_LINE> for scriptId, s in self.testsContext.items(): <NEW_LINE> <INDENT> for adapterId, a in s.items(): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> if a.ctx() is not None: <NEW_LINE> <INDENT> a.ctx().stop() <NEW_LINE> a.ctx().join() <NEW_LINE> <DEDENT> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> a.stop() <NEW_LINE> a.join() <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> logging.debug("Tests context after cleanup: %s" % self.testsContext) <NEW_LINE> self.onCleanup() | Called on program stop | 625941babe8e80087fb20af0 |
def __find_sync(self) -> bool: <NEW_LINE> <INDENT> sync = "SYNC %s" % self.remote <NEW_LINE> try: <NEW_LINE> <INDENT> msg = self.conn.readline(Race.READ_TIMEOUT) <NEW_LINE> return sync in msg <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> logging.warn("Did not receive SYNC from %s, skipping." % self.remote) <NEW_LINE> return False | Waits for a blast from the lirc process and returns true if it's
a syncing signal from the Carrera IR tower. | 625941ba9c8ee82313fbb61e |
def mergeSubPops(self, *args, **kwargs) -> "size_t": <NEW_LINE> <INDENT> return _simuPOP_ba.Population_mergeSubPops(self, *args, **kwargs) | Usage:
x.mergeSubPops(subPops=ALL_AVAIL, name="", toSubPop=-1)
Details:
Merge subpopulations subPops. If subPops is ALL_AVAIL (default),
all subpopulations will be merged. subPops do not have to be
adjacent to each other. They will all be merged to the
subpopulation with the smallest subpopulation ID, unless a
subpopulation ID is specified using parameter toSubPop. Indexes of
the rest of the subpopulation may be changed. A new name can be
assigned to the merged subpopulation through parameter name (an
empty name will be ignored). This function returns the ID of the
merged subpopulation. | 625941ba3346ee7daa2b2c12 |
def pre_rolling_restart(self, env): <NEW_LINE> <INDENT> import params <NEW_LINE> env.set_params(params) <NEW_LINE> if not params.version or compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') < 0: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> Logger.info("Executing Oozie Server Rolling Upgrade pre-restart") <NEW_LINE> oozie_server_upgrade.backup_configuration() <NEW_LINE> Execute(format("hdp-select set oozie-server {version}")) <NEW_LINE> oozie_server_upgrade.restore_configuration() <NEW_LINE> oozie_server_upgrade.prepare_libext_directory() <NEW_LINE> oozie_server_upgrade.upgrade_oozie() | Performs the tasks surrounding the Oozie startup when a rolling upgrade
is in progress. This includes backing up the configuration, updating
the database, preparing the WAR, and installing the sharelib in HDFS.
:param env:
:return: | 625941bad164cc6175782bf7 |
def make_mask(self, threshisl=5): <NEW_LINE> <INDENT> logger.info('%s: Making mask...' % self.imagename) <NEW_LINE> if not os.path.exists(self.maskname): <NEW_LINE> <INDENT> make_mask(image_name=self.imagename, mask_name=self.maskname, threshisl=threshisl, atrous_do=True) <NEW_LINE> <DEDENT> if self.user_mask is not None: <NEW_LINE> <INDENT> logger.info('%s: Adding user mask (%s)...' % (self.imagename, self.user_mask)) <NEW_LINE> blank_image_reg(self.maskname, self.user_mask, inverse=False, blankval=1) | Create a mask of the image where only belivable flux is | 625941ba38b623060ff0ac98 |
def __call__(self, inputs): <NEW_LINE> <INDENT> crv_in, ctrl_points = inputs[0], inputs[1] <NEW_LINE> crv_out = None <NEW_LINE> if crv_in is None: <NEW_LINE> <INDENT> if ctrl_points: <NEW_LINE> <INDENT> pts = loads(ctrl_points) <NEW_LINE> crv_out = NurbsCurve2D(pts) <NEW_LINE> <DEDENT> <DEDENT> crv = crv_in if crv_in else crv_out <NEW_LINE> if crv: <NEW_LINE> <INDENT> pts = dumps(crv.ctrlPointList) <NEW_LINE> self.set_input(1,pts) <NEW_LINE> <DEDENT> return crv, | A curve 2D | 625941baadb09d7d5db6c63b |
def __init__(self, logZ, dust=True, re_z=0.0): <NEW_LINE> <INDENT> if dust: <NEW_LINE> <INDENT> self.grains = 'no grains\ngrains ISM' <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.grains = 'no grains' <NEW_LINE> <DEDENT> self.re_z=re_z <NEW_LINE> abundSet.__init__(self, 'UVbyler', logZ) | arbitrarily vary C/O at fixed O. | 625941ba50812a4eaa59c1ce |
def testAzureAuthorizationResponseModel(self): <NEW_LINE> <INDENT> pass | Test AzureAuthorizationResponseModel | 625941bafff4ab517eb2f2e3 |
def insert(self, word: str) -> None: <NEW_LINE> <INDENT> cur = self.root <NEW_LINE> for lvl in range(len(word)): <NEW_LINE> <INDENT> idx = ord(word[lvl]) - ord('a') <NEW_LINE> if not cur.children[idx]: <NEW_LINE> <INDENT> cur.children[idx] = TrieNode() <NEW_LINE> <DEDENT> cur = cur.children[idx] <NEW_LINE> <DEDENT> cur.isEndOfWord = True | Inserts a word into the trie. | 625941ba7b180e01f3dc46ad |
def sanitize_id(x): <NEW_LINE> <INDENT> return re.sub(r'[^-\w ]', '', x.replace('.', '-'), re.U).replace(' ', '-') | Sanitize an ID similar to github_sanitize_id, but with the
following differences:
* no downcasing
* dots (.) are replaced with hyphens (which helps Python module
namespaces look better) | 625941ba3eb6a72ae02ec37d |
def write_serial(fd, bytes): <NEW_LINE> <INDENT> fd.write("".join([chr(a) for a in bytes])) | Writes the bytes in the supplied tuple to the serial port | 625941bae8904600ed9f1dd2 |
def compareScore(self, other): <NEW_LINE> <INDENT> if self.getScore() > other.getScore() and self.getScore() <= 21: <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> return False | This method takes the score of the
hand and compares it to that of another.
It returns true if the hand's score is bigger
and false if the other's is bigger | 625941ba7b180e01f3dc46ae |
def odd(x): <NEW_LINE> <INDENT> if x%2 != 0: <NEW_LINE> <INDENT> return "true" <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return "false" | x: int or float.
returns: True if x is odd, False otherwise | 625941bac432627299f04aed |
def ffmpeg_merge_video_audio(video,audio,output, vcodec='copy', acodec='copy', ffmpeg_output=False, verbose = True): <NEW_LINE> <INDENT> cmd = [FFMPEG_BINARY, "-y", "-i", audio,"-i", video, "-vcodec", vcodec, "-acodec", acodec, output] <NEW_LINE> subprocess_call(cmd, verbose = verbose) | merges video file ``video`` and audio file ``audio`` into one
movie file ``output``. | 625941ba56ac1b37e626407f |
def create_light_spot(self, context, newlight = False, dupli_name = "Lumiere"): <NEW_LINE> <INDENT> if newlight: <NEW_LINE> <INDENT> dupli = get_object(context, self.lightname) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> dupli = create_dupli(self, context, dupli_name) <NEW_LINE> <DEDENT> bpy.ops.object.lamp_add(type='SPOT', view_align=False, location=(0,0,0)) <NEW_LINE> context.active_object.data.name = "LAMP_" + dupli.data.name <NEW_LINE> lamp = context.object <NEW_LINE> lamp.name = "LAMP_" + dupli.data.name <NEW_LINE> lamp.data.cycles.use_multiple_importance_sampling = True <NEW_LINE> lamp.Lumiere.typlight = "Spot" <NEW_LINE> lamp.Lumiere.lightname = context.active_object.data.name <NEW_LINE> bpy.ops.object.constraint_add(type='COPY_TRANSFORMS') <NEW_LINE> lamp.constraints["Copy Transforms"].target = bpy.data.objects[dupli.name] <NEW_LINE> context.scene.objects.active = bpy.data.objects[dupli.name] <NEW_LINE> lamp.parent = dupli <NEW_LINE> if not newlight: <NEW_LINE> <INDENT> create_lamp_nodes(self, context, lamp) <NEW_LINE> <DEDENT> return(dupli) | Create a blender light spot | 625941ba71ff763f4b549538 |
def set_from_obj(self, instance, obj): <NEW_LINE> <INDENT> value = { 'app_config_id': obj.repo.app_config_id, 'rev': obj.commit.object_id, 'path': obj.path } <NEW_LINE> super(RepoCommitPathSpec, self).__set__(instance, value) | set from a file or folder obj | 625941ba8e7ae83300e4ae75 |
def transform_data(self, qs): <NEW_LINE> <INDENT> data = None <NEW_LINE> for row in qs: <NEW_LINE> <INDENT> row = [row[n] for n in self.select.keys()] <NEW_LINE> if data is None: <NEW_LINE> <INDENT> data = self.get_complete_dict(len(row)) <NEW_LINE> <DEDENT> for i in range(1, len(row)): <NEW_LINE> <INDENT> data[row[0]][i] += row[i] <NEW_LINE> <DEDENT> <DEDENT> return [[key] + value for key, value in data.iteritems()] | * groups and converts data of a final queryset
* expects a Date()-object in the first column -> x-axis
* all other columns are handles as data lines -> [y-axis 1, 2, ...] | 625941ba460517430c394036 |
def unset_string(self, option, s=None): <NEW_LINE> <INDENT> self('unset %s' % (option,)) | Set a string option, or if s is omitted, unset the option. | 625941bab57a9660fec3372a |
def __eq__(self, other): <NEW_LINE> <INDENT> if not isinstance(other, MessageListResponse): <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> return self.to_dict() == other.to_dict() | Returns true if both objects are equal | 625941ba8a43f66fc4b53f12 |
def test_create_profile(self): <NEW_LINE> <INDENT> payload = { "first_name": fake.name().split(' ')[0], "last_name": fake.name().split(' ')[1], "phone": f'{phn()}', } <NEW_LINE> res = self.client.post(PROFILE_URL, payload) <NEW_LINE> self.assertEqual(res.status_code, status.HTTP_201_CREATED) | Test creating profile | 625941ba2eb69b55b151c754 |
def stripCase(obj,attr = "all"): <NEW_LINE> <INDENT> caseLess = [] <NEW_LINE> if attr: <NEW_LINE> <INDENT> material = getattr(obj, attr, None) <NEW_LINE> if material is None: <NEW_LINE> <INDENT> material = getattr(obj.tags, attr, None) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> material = obj <NEW_LINE> <DEDENT> for o in material: <NEW_LINE> <INDENT> oLess = o <NEW_LINE> try: <NEW_LINE> <INDENT> oLess = o.casefold() <NEW_LINE> <DEDENT> except AttributeError: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> caseLess.append(oLess) <NEW_LINE> <DEDENT> return caseLess | returns a list of caseless equivalents for all items in obj.attr
obj.attr must be iterable
if attr is specified as None, function iterates through obj itself
if an item cannot be casefolded, function will append original version of the item | 625941ba23e79379d52ee411 |
def draw(self): <NEW_LINE> <INDENT> self.current_state.draw() <NEW_LINE> fps_string = 'FPS: {0:.2f}'.format(self.fps_clock.get_fps()) <NEW_LINE> self.graphics.draw_string((1200, 2), fps_string, WHITE) | Render. | 625941baaad79263cf3908e5 |
def emorse(text): <NEW_LINE> <INDENT> text = map(str, ' '.join(text).upper()) <NEW_LINE> inverted = {} <NEW_LINE> for key in MORSECODES: <NEW_LINE> <INDENT> inverted[MORSECODES[key]] = key <NEW_LINE> <DEDENT> return reducer(text, inverted, ' ') | Encodes a text in morse code | 625941bad6c5a10208143ef1 |
def _query_range_get(self): <NEW_LINE> <INDENT> return (self.query_start, self.query_end) | Return the start and end of a query (PRIVATE). | 625941ba167d2b6e31218a40 |
def _parseline(line): <NEW_LINE> <INDENT> pts=[] <NEW_LINE> for c in '=!': <NEW_LINE> <INDENT> if line.find(c) != -1: <NEW_LINE> <INDENT> pts.append(line.find(c)) <NEW_LINE> <DEDENT> <DEDENT> if pts: <NEW_LINE> <INDENT> line = line[:min(pts)] <NEW_LINE> <DEDENT> line = line.translate(str.maketrans('', '', '\'\"')) <NEW_LINE> line = line.strip() <NEW_LINE> return line | Process a line from the DDSCAT file.
:param line: The input string to process
:returns: A string with extraneous characters removed
Ignores any characters after a '=' or '!'
Removes quote characters | 625941ba2ae34c7f2600cfdb |
def thumbnail(self, alias: Optional[str] = None) -> dict: <NEW_LINE> <INDENT> return { 'url': self._adapter.url(self._file, alias), 'alt': self._adapter.alt(self._file), **self._additional, } | Basic thumbnail generation method, also used for backwards compatibility. Uses
the image instance and the provided thumbnail alias to return a dict
with a url and an alt text. | 625941ba91af0d3eaac9b8be |
def get_data_loaders(batch_size: int, model): <NEW_LINE> <INDENT> col_fn = collate_fn <NEW_LINE> if model == SwaVModel: <NEW_LINE> <INDENT> col_fn = swav_collate_fn <NEW_LINE> <DEDENT> elif model == DINOModel: <NEW_LINE> <INDENT> col_fn = dino_collate_fn <NEW_LINE> <DEDENT> dataloader_train_ssl = torch.utils.data.DataLoader( dataset_train_ssl, batch_size=batch_size, shuffle=True, collate_fn=col_fn, drop_last=True, num_workers=num_workers ) <NEW_LINE> dataloader_train_kNN = torch.utils.data.DataLoader( dataset_train_kNN, batch_size=batch_size, shuffle=False, drop_last=False, num_workers=num_workers ) <NEW_LINE> dataloader_test = torch.utils.data.DataLoader( dataset_test, batch_size=batch_size, shuffle=False, drop_last=False, num_workers=num_workers ) <NEW_LINE> return dataloader_train_ssl, dataloader_train_kNN, dataloader_test | Helper method to create dataloaders for ssl, kNN train and kNN test
Args:
batch_size: Desired batch size for all dataloaders | 625941ba50485f2cf553cc42 |
def select_map(self): <NEW_LINE> <INDENT> cartes = self.maze.get_map_list() <NEW_LINE> msg_o_client = "Cartes disponibles sur le serveur : \n" <NEW_LINE> for i, carte in enumerate(cartes): <NEW_LINE> <INDENT> msg_o_client += "{} - {}\n".format(i + 1, carte[0]) <NEW_LINE> <DEDENT> msg_o_client += "Entrez le numéro de la carte : " <NEW_LINE> return msg_o_client | Factorisation code | 625941ba1d351010ab8559c6 |
def set_refine(self, refine): <NEW_LINE> <INDENT> self.query_view.version_result_view.controller.set_refine(refine) | set_refine(refine: bool) -> None
Set the refine state to True or False | 625941bab7558d58953c4dc4 |
@cli.command() <NEW_LINE> @click.option('--name', default=None, help='Name of the PaaS instance, will be generated if not ' 'provided.') <NEW_LINE> @option('--size', default='s', type=click.Choice(['s', 'm', 'x', 'xl', 'xxl']), help='Size of the PaaS instance.') <NEW_LINE> @option('--type', default='pythonpgsql', type=PAAS_TYPE, help='Type of the PaaS instance.') <NEW_LINE> @option('--quantity', default=0, help='Additional disk amount (in GB).') <NEW_LINE> @option('--duration', default='1m', help='Number of month, suffixed with m.') <NEW_LINE> @option('--datacenter', type=DATACENTER, default='LU', help='Datacenter where the PaaS will be spawned.') <NEW_LINE> @click.option('--vhosts', default=None, multiple=True, help='List of virtual hosts to be linked to the instance.') <NEW_LINE> @click.option('--password', prompt=True, hide_input=True, confirmation_prompt=True, required=True, help='Password of the PaaS instance.') <NEW_LINE> @click.option('--snapshotprofile', default=None, help='Set a snapshot profile associated to this paas disk.') <NEW_LINE> @click.option('--bg', '--background', default=False, is_flag=True, help='Run command in background mode (default=False).') <NEW_LINE> @option('--sshkey', multiple=True, help='Authorize ssh authentication for the given ssh key.') <NEW_LINE> @pass_gandi <NEW_LINE> def create(gandi, name, size, type, quantity, duration, datacenter, vhosts, password, snapshotprofile, background, sshkey): <NEW_LINE> <INDENT> if not name: <NEW_LINE> <INDENT> name = randomstring() <NEW_LINE> <DEDENT> result = gandi.paas.create(name, size, type, quantity, duration, datacenter, vhosts, password, snapshotprofile, background, sshkey) <NEW_LINE> return result | Create a new PaaS instance and initialize associated git repository.
you can specify a configuration entry named 'sshkey' containing
path to your sshkey file
$ gandi config -g sshkey ~/.ssh/id_rsa.pub
or getting the sshkey "my_key" from your gandi ssh keyring
$ gandi config -g sshkey my_key
to know which PaaS instance type to use as type
$ gandi types | 625941ba76d4e153a657e9da |
def streaming_bulk(self, actions, chunk_size=500, max_chunk_bytes=100 * 1024 * 1024, raise_on_error=True, expand_action_callback=ActionParser.expand_action, raise_on_exception=True, **kwargs): <NEW_LINE> <INDENT> actions = list(map(expand_action_callback, actions)) <NEW_LINE> for bulk_actions in self._chunk_actions(actions, chunk_size, max_chunk_bytes): <NEW_LINE> <INDENT> yield self._process_bulk_chunk(bulk_actions, raise_on_exception, raise_on_error, **kwargs) | Streaming bulk consumes actions from the iterable passed in and return the results of all bulk data
:func:`~elasticsearch.helpers.bulk` which is a wrapper around streaming
bulk that returns summary information about the bulk operation once the
entire input is consumed and sent.
:arg actions: iterable containing the actions to be executed
:arg chunk_size: number of docs in one chunk sent to es (default: 500)
:arg max_chunk_bytes: the maximum size of the request in bytes (default: 100MB)
:arg raise_on_error: raise ``BulkIndexError`` containing errors (as `.errors`)
from the execution of the last chunk when some occur. By default we raise.
:arg raise_on_exception: if ``False`` then don't propagate exceptions from
call to ``bulk`` and just report the items that failed as failed.
:arg expand_action_callback: callback executed on each action passed in,
should return a tuple containing the action line and the data line
(`None` if data line should be omitted). | 625941ba7d43ff24873a2b46 |
def find_new(flag): <NEW_LINE> <INDENT> if flag == -1: <NEW_LINE> <INDENT> sql = "SELECT * FROM record WHERE interval = -1" <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> sql = "SELECT * FROM record WHERE interval = -1 AND recordId LIKE '%" + flag + "'" <NEW_LINE> <DEDENT> return DBFun.select('db_pymemo.db', sql) | 找到新记录并返回
:return: | 625941bacb5e8a47e48b7958 |
def get(self, request, **kwargs): <NEW_LINE> <INDENT> meetup = Meeting.objects.filter(id=self.kwargs['meetup_id']) <NEW_LINE> question = Question.objects.filter(id=self.kwargs['question_id']) <NEW_LINE> if meetup: <NEW_LINE> <INDENT> if not question: <NEW_LINE> <INDENT> return Response( { "status": status.HTTP_404_NOT_FOUND, "error": "Question not found." }, status=status.HTTP_404_NOT_FOUND ) <NEW_LINE> <DEDENT> queryset = Comment.objects.filter(question=self.kwargs['question_id']) <NEW_LINE> serializer = CommentSerializer(queryset, many=True) <NEW_LINE> data = [] <NEW_LINE> for comment in serializer.data: <NEW_LINE> <INDENT> user = User.objects.filter(Q(username=comment["created_by"])).distinct().first() <NEW_LINE> comment["created_by_id"] = user.id <NEW_LINE> comment["question_name"] = question.first().title <NEW_LINE> data.append(comment) <NEW_LINE> <DEDENT> return Response( { "status": status.HTTP_200_OK, "comments": data } ) <NEW_LINE> <DEDENT> return Response( { "status": status.HTTP_404_NOT_FOUND, "error": "Meetup not found." }, status=status.HTTP_404_NOT_FOUND ) | Return a list of comments. | 625941bad99f1b3c44c67440 |
def add_file_raw(self, name, data, cbfs_offset=None, compress=COMPRESS_NONE): <NEW_LINE> <INDENT> cfile = CbfsFile.raw(name, data, cbfs_offset, compress) <NEW_LINE> self._files[name] = cfile <NEW_LINE> return cfile | Create a new raw file
Args:
name: String file name to put in CBFS (does not need to correspond
to the name that the file originally came from)
data: Contents of file
cbfs_offset: Offset of this file's data within the CBFS, in bytes,
or None to place this file anywhere
compress: Compression algorithm to use (COMPRESS_...)
Returns:
CbfsFile object created | 625941ba507cdc57c6306b7e |
def fit(self,X=None,ngrids=100,REML=True): <NEW_LINE> <INDENT> if X == None: X = self.X0t <NEW_LINE> else: <NEW_LINE> <INDENT> self.X0t_stack[:,(self.q)] = matrixMult(self.Kve.T,X)[:,0] <NEW_LINE> X = self.X0t_stack <NEW_LINE> <DEDENT> H = np.array(range(ngrids)) / float(ngrids) <NEW_LINE> L = np.array([self.LL(h,X,stack=False,REML=REML)[0] for h in H]) <NEW_LINE> self.LLs = L <NEW_LINE> hmax = self.getMax(H,X,REML) <NEW_LINE> L,beta,sigma,betaSTDERR = self.LL(hmax,X,stack=False,REML=REML) <NEW_LINE> self.H = H <NEW_LINE> self.optH = hmax.sum() <NEW_LINE> self.optLL = L <NEW_LINE> self.optBeta = beta <NEW_LINE> self.optSigma = sigma.sum() <NEW_LINE> return hmax,beta,sigma,L | Finds the maximum-likelihood solution for the heritability (h) given the current parameters.
X can be passed and will transformed and concatenated to X0t. Otherwise, X0t is used as
the covariate matrix.
This function calculates the LLs over a grid and then uses .getMax(...) to find the optimum.
Given this optimum, the function computes the LL and associated ML solutions. | 625941ba07f4c71912b11331 |
def sync_start(self): <NEW_LINE> <INDENT> sync_start_cmd = ['sync start'] <NEW_LINE> self._run_openstack_cmds(sync_start_cmd) | Sends indication to EOS that ML2->EOS sync has started. | 625941baec188e330fd5a64f |
def delete_attribute(self, dn, attr, value): <NEW_LINE> <INDENT> m = ldb.Message() <NEW_LINE> m.dn = ldb.Dn(self.test_ldb_dc, dn) <NEW_LINE> m[attr] = ldb.MessageElement(value, ldb.FLAG_MOD_DELETE, attr) <NEW_LINE> self.test_ldb_dc.modify(m) | Deletes an attribute from an object | 625941ba57b8e32f5248334a |
def test_update_wait_list(self): <NEW_LINE> <INDENT> pass | Test case for update_wait_list
# noqa: E501 | 625941ba55399d3f0558855d |
def _handleSaveSchema(self): <NEW_LINE> <INDENT> schema_name = self.new_schema_name_value.text() <NEW_LINE> schema_driver = self.new_schema_driver_value.currentText() <NEW_LINE> schema_host = self.new_schema_host_value.text() <NEW_LINE> schema_port = self.new_schema_port_value.text() <NEW_LINE> if schema_driver == "oracle": <NEW_LINE> <INDENT> schema_service_name = self.new_schema_servicename_value.text() <NEW_LINE> datasource_other = schema_service_name <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> schema_user = self.new_schema_user_value.text() <NEW_LINE> schema_pass = self.new_schema_pass_value.text() <NEW_LINE> datasource_other = "{0}&{1}".format(schema_user,schema_pass) <NEW_LINE> <DEDENT> if schema_name == "" or schema_host == "" or schema_port == "" or datasource_other == "": <NEW_LINE> <INDENT> _result = "失败" <NEW_LINE> _detail = "信息不完整" <NEW_LINE> self.add_schema_result.setStyleSheet("color:red;") <NEW_LINE> self.add_schema_result.setText(_result) <NEW_LINE> self.add_schema_result_detail.setText(_detail) <NEW_LINE> return <NEW_LINE> <DEDENT> if not (IsIp(schema_host) or IsDomain(schema_host)): <NEW_LINE> <INDENT> _result = "失败" <NEW_LINE> _detail = "主机信息错误,不是IP或者域名" <NEW_LINE> self.add_schema_result.setStyleSheet("color:red;") <NEW_LINE> self.add_schema_result.setText(_result) <NEW_LINE> self.add_schema_result_detail.setText(_detail) <NEW_LINE> return <NEW_LINE> <DEDENT> sr = insertSchemaRecord(self.mysqlite, schema_name, schema_driver,schema_host, schema_port, datasource_other) <NEW_LINE> if sr is None: <NEW_LINE> <INDENT> schema_id = getDataSourceCurrentSeq(self.mysqlite) <NEW_LINE> if schema_driver == "oracle": <NEW_LINE> <INDENT> child1 = QTreeWidgetItem(self.oracle_tree) <NEW_LINE> <DEDENT> elif schema_driver == "mysql": <NEW_LINE> <INDENT> child1 = QTreeWidgetItem(self.mysql_tree) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> child1 = QTreeWidgetItem(self.schema_tree) <NEW_LINE> <DEDENT> child1.setText(0, schema_name) <NEW_LINE> child1.setText(1, str(schema_id)) <NEW_LINE> self.left_tree.setCurrentItem(child1) <NEW_LINE> _result = "成功" <NEW_LINE> _detail = "" <NEW_LINE> self.add_schema_result.setStyleSheet("color:green;") <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> _result = "失败" <NEW_LINE> _detail = "{0}".format(sr) <NEW_LINE> self.add_schema_result.setStyleSheet("color:red;") <NEW_LINE> <DEDENT> self.add_schema_result.setText(_result) <NEW_LINE> self.add_schema_result_detail.setText(_detail) | 新添加的数据源信息保存到数据库
:return: | 625941ba236d856c2ad44687 |
def test_filter_providers_by_name_contains(self): <NEW_LINE> <INDENT> iam_arn = 'arn:aws:s3:::my_s3_bucket' <NEW_LINE> bucket_name = 'my_s3_bucket' <NEW_LINE> create_response = self.create_provider(bucket_name, iam_arn, ) <NEW_LINE> provider_result = create_response.json() <NEW_LINE> provider_uuid = provider_result.get('uuid') <NEW_LINE> self.assertIsNotNone(provider_uuid) <NEW_LINE> url = '%s?mame=provider' % reverse('provider-list') <NEW_LINE> client = APIClient() <NEW_LINE> response = client.get(url, **self.headers) <NEW_LINE> self.assertEqual(response.status_code, status.HTTP_200_OK) <NEW_LINE> json_result = response.json() <NEW_LINE> results = json_result.get('data') <NEW_LINE> self.assertIsNotNone(results) <NEW_LINE> self.assertEqual(len(results), 1) | Test that providers that contain name appear. | 625941ba97e22403b379ce43 |
def init(mainWindow, debug=False): <NEW_LINE> <INDENT> global __mainWindow <NEW_LINE> global __debug <NEW_LINE> __mainWindow = mainWindow <NEW_LINE> __debug = debug <NEW_LINE> sys.excepthook = _excepthook | Initialize the module | 625941ba8a349b6b435e801e |
def lexsort(A, dim=1): <NEW_LINE> <INDENT> if isnumpy(A): <NEW_LINE> <INDENT> i = numpy.lexsort(A, axis=dim) <NEW_LINE> return A[i], i <NEW_LINE> <DEDENT> if istorch(A): <NEW_LINE> <INDENT> out = A.clone() <NEW_LINE> if dim == 0: <NEW_LINE> <INDENT> out = torch.t(out) <NEW_LINE> <DEDENT> off = indices(out.shape[1]-1, 0, dtype=A.dtype, device=A.device).squeeze()*out.shape[0]+1 <NEW_LINE> i = torch.argsort(torch.sum(out*off, dim=1)) <NEW_LINE> if dim == 0: <NEW_LINE> <INDENT> out = A[:, i] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> out = A[i] <NEW_LINE> <DEDENT> return out, i <NEW_LINE> <DEDENT> raise RuntimeError('Unknown data type') | Lexicographic sort of elements in a given Tensor
Parameters
----------
A : Tensor
a two dimensional Tensor
dim : int (optional)
the sorting dimension (default is 1)
Returns
-------
(Tensor,LongTensor)
The sorted Tensor and the rows/cols indices in the original input | 625941ba31939e2706e4cd19 |
def grad_origin(self,x): <NEW_LINE> <INDENT> return ((x - self.origin.value) * np.exp(-(x - self.origin.value)**2/(2 * self.sigma.value**2)) * self.A.value) / (sqrt2pi * self.sigma.value**3) | Given an one dimensional array x containing the energies at which
you want to evaluate the gradient of the background model,
returns the gradient of parameter origin for the current value of
the parameters. | 625941ba24f1403a92600a13 |
def get_epiweek2date_expr() -> Callable: <NEW_LINE> <INDENT> return ibis.postgres.udf.existing_udf( 'epiweek2date', input_types=['int64'], output_type='date' ) | Return a UDF expression for epiweek2date
Returns
-------
Callable | 625941ba21bff66bcd6847ff |
def __init__(self, num_tokens_as_food_card=TRAIT_CARD_DEFAULT_FOOD_TOKENS): <NEW_LINE> <INDENT> description = "Scavenger automatically eats one food token every time a Carnivore eats another species." <NEW_LINE> super().__init__(num_tokens_as_food_card, description) | :param num_tokens_as_food_card: The food tokens associated with this ScavengerCard
:type num_tokens_as_food_card: int
:return None | 625941ba656771135c3eb71c |
def _pprint_dict(self, value, level=0): <NEW_LINE> <INDENT> if not value: <NEW_LINE> <INDENT> self._print_level('{},', 0) <NEW_LINE> return <NEW_LINE> <DEDENT> self._print_level('{', 0) <NEW_LINE> for k in sorted(value.keys()): <NEW_LINE> <INDENT> self._print_level("'%s':" % k, level, False) <NEW_LINE> self._process_value(value[k], level) <NEW_LINE> <DEDENT> self._print_level('},', level - self._indent) | Pretty print a dictionary
:type value: :class:`dict`
:param value: Value to be processed
:type level: :class:`int`
:param level: Indentation level | 625941baa8370b771705274b |
def parse_game_spec(game_spec): <NEW_LINE> <INDENT> pieces = game_spec.split() <NEW_LINE> return int(pieces[0]), int(pieces[-2]) | Parse the string specifying the number of players and max value of marble to play to.
Return a tuple (N players, Max marble value) | 625941ba6fece00bbac2d5e6 |
def countPlayers(self, com_id): <NEW_LINE> <INDENT> result = len(self.getDb().bets.distinct("user_id", {"com_id":com_id})) <NEW_LINE> return result | count of number of distinct user in a community
:param com_id: the community id
:return: the number of user who had bet | 625941ba8c0ade5d55d3e869 |
def test_single_sentence_no_punctuation_easy(self): <NEW_LINE> <INDENT> test_sentence = "Hello here is sentence." <NEW_LINE> expected_lexicon = {'NOUN': ['hello', 'sentence'], 'ADV': ['here'], 'VERB': ['is']} <NEW_LINE> lexicon = {} <NEW_LINE> nltktest.add_paragraph_no_punctuation(test_sentence, lexicon) <NEW_LINE> self.assertEqual(expected_lexicon, lexicon) | Tests whether a single sentence achieves the expected output while stripping away punctuation. | 625941bab830903b967e97c0 |
def points(self): <NEW_LINE> <INDENT> return _digital_swig.digital_constellation_qpsk_sptr_points(self) | points(self) -> gr_complex_vector
Returns the set of points in this constellation. | 625941ba66656f66f7cbc054 |
def _single_element_action(self, g, u): <NEW_LINE> <INDENT> if not isinstance(g, tuple): <NEW_LINE> <INDENT> raise TypeError() <NEW_LINE> <DEDENT> if len(u) != 6: <NEW_LINE> <INDENT> raise ValueError() <NEW_LINE> <DEDENT> g_matrix, g_vector = g <NEW_LINE> q, omega = np.split(u, 2) <NEW_LINE> new_q = g_matrix @ q <NEW_LINE> new_omega = g_matrix @ omega + self._hat(g_vector) @ g_matrix @ q <NEW_LINE> return np.hstack((new_q, new_omega)) | [summary]
Parameters
----------
g : Two-tuple
First element 3-by-3 matrix, second element array of length 3
u : Array
Must be of length 3 | 625941ba30c21e258bdfa345 |
def insertion_sort(t): <NEW_LINE> <INDENT> n = len(t) <NEW_LINE> for i in range(1, n): <NEW_LINE> <INDENT> tcur = t[i] <NEW_LINE> j = i <NEW_LINE> while j > 0: <NEW_LINE> <INDENT> if t[j-1] <= tcur: break <NEW_LINE> t[j] = t[j-1] <NEW_LINE> j -= 1 <NEW_LINE> <DEDENT> t[j] = tcur <NEW_LINE> <DEDENT> return t | :param tablica:
:return posortowana tablica:
sortowanie przez wstawianie
dziala w miejscu | 625941bad53ae8145f87a120 |
def is_zero(self, *args): <NEW_LINE> <INDENT> return _vnl_matrixPython.vnl_matrixSC_is_zero(self, *args) | is_zero(self) -> bool
is_zero(self, double tol) -> bool | 625941baa79ad161976cbfef |
def __init__(self, file, line, err) : <NEW_LINE> <INDENT> if hasattr(file, "function") : <NEW_LINE> <INDENT> file = file.function.func_code.co_filename <NEW_LINE> <DEDENT> elif hasattr(file, "co_filename") : <NEW_LINE> <INDENT> file = file.co_filename <NEW_LINE> <DEDENT> elif hasattr(line, "co_filename") : <NEW_LINE> <INDENT> file = line.co_filename <NEW_LINE> <DEDENT> if file[:2] == './' : <NEW_LINE> <INDENT> file = file[2:] <NEW_LINE> <DEDENT> self.file = file <NEW_LINE> if hasattr(line, "co_firstlineno") : <NEW_LINE> <INDENT> line = line.co_firstlineno <NEW_LINE> <DEDENT> if line == None : <NEW_LINE> <INDENT> line = 1 <NEW_LINE> <DEDENT> self.line = line <NEW_LINE> self.err = err <NEW_LINE> self.level = err.level | @param file: an object from which the file where the warning
was found can be derived
@type file: L{types.CodeType}, L{function.FakeCode} or str
@param line: the line where the warning was found; if file was str,
then line will be a code object.
@type line: int or L{types.CodeType} or None
@type err: L{msgs.WarningClass} | 625941ba4c3428357757c1d5 |
def fetch_col(self,ind): <NEW_LINE> <INDENT> return [self.values[r*self.n_col + ind] for r in xrange(self.n_row)] | Get all elements in a specified column. | 625941bb1f5feb6acb0c49ff |
def url(host: str, api: str) -> str: <NEW_LINE> <INDENT> if host.endswith("/") and api.startswith("/"): <NEW_LINE> <INDENT> host_ = host[:-1] <NEW_LINE> <DEDENT> elif not host.endswith("/") and not api.startswith("/"): <NEW_LINE> <INDENT> host_ = host + "/" <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> host_ = host <NEW_LINE> <DEDENT> return host_ + api | 根据host和api,拼接url地址
:param host: api请求host
:param api: api名
:return: | 625941bafb3f5b602dac353a |
def add(self, bytestring): <NEW_LINE> <INDENT> self.num_in += 1 <NEW_LINE> value_list = self.bsl.add_line(bytestring) <NEW_LINE> self.root.weight += 1 <NEW_LINE> node = self.root <NEW_LINE> i = 0 <NEW_LINE> while i < len(value_list): <NEW_LINE> <INDENT> if i == len(value_list) - 1: <NEW_LINE> <INDENT> node.next_inc_lookahead1(value_list[i]) <NEW_LINE> i += 1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> tpl = node.next_inc_lookahead2(value_list[i], value_list[i+1]) <NEW_LINE> node = tpl[0] <NEW_LINE> i += tpl[1] | adds a byte string to the Shrubbery | 625941bab5575c28eb68dea8 |
def __init__(self, source, namespaces, **kwds): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> namespaces = [dict(n) for n in namespaces] <NEW_LINE> <DEDENT> except Exception: <NEW_LINE> <INDENT> raise TypeError("namespaces must be a non-empty list of dicts.") <NEW_LINE> <DEDENT> super(Interpreter, self).__init__(source, **kwds) <NEW_LINE> self.namespaces = namespaces | Parse `source` and mixin interpreted Python objects from `namespaces`.
:type source: str
:arg source: Code to parse.
:type namespaces: list of dict
:arg namespaces: a list of namespace dictionaries such as the one
returned by :func:`locals`.
Other optional arguments are same as the ones for :class:`Script`.
If `line` and `column` are None, they are assumed be at the end of
`source`. | 625941baad47b63b2c509e33 |
def ex_list_availability_zones(self, only_available=True): <NEW_LINE> <INDENT> params = {'Action': 'DescribeAvailabilityZones'} <NEW_LINE> filters = {'region-name': self.region_name} <NEW_LINE> if only_available: <NEW_LINE> <INDENT> filters['state'] = 'available' <NEW_LINE> <DEDENT> params.update(self._build_filters(filters)) <NEW_LINE> result = self.connection.request(self.path, params=params.copy()).object <NEW_LINE> availability_zones = [] <NEW_LINE> for element in findall(element=result, xpath='availabilityZoneInfo/item', namespace=NAMESPACE): <NEW_LINE> <INDENT> name = findtext(element=element, xpath='zoneName', namespace=NAMESPACE) <NEW_LINE> zone_state = findtext(element=element, xpath='zoneState', namespace=NAMESPACE) <NEW_LINE> region_name = findtext(element=element, xpath='regionName', namespace=NAMESPACE) <NEW_LINE> availability_zone = ExEC2AvailabilityZone( name=name, zone_state=zone_state, region_name=region_name ) <NEW_LINE> availability_zones.append(availability_zone) <NEW_LINE> <DEDENT> return availability_zones | Return a list of :class:`ExEC2AvailabilityZone` objects for the
current region.
Note: This is an extension method and is only available for EC2
driver.
:keyword only_available: If true, return only availability zones
with state 'available'
:type only_available: ``str``
:rtype: ``list`` of :class:`ExEC2AvailabilityZone` | 625941ba287bf620b61d3918 |
def EnterGradientColocation(self, op, gradient_uid): <NEW_LINE> <INDENT> if self._outer_context: <NEW_LINE> <INDENT> self._outer_context.EnterGradientColocation(op, gradient_uid) | Start building a gradient colocated with an op. | 625941bade87d2750b85fc39 |
def aliases(self) -> Iterable[str]: <NEW_LINE> <INDENT> return iter(self._aliases.values()) | :return: iterable of aliases | 625941bacb5e8a47e48b7959 |
def test_load_from_json(self, ): <NEW_LINE> <INDENT> model = load_model("hydropower_example.json") <NEW_LINE> r = model.recorders['turbine1_energy'] <NEW_LINE> assert r.water_elevation_parameter == model.parameters['reservoir1_level'] <NEW_LINE> assert r.node == model.nodes['turbine1'] <NEW_LINE> assert_allclose(r.turbine_elevation, 35.0) <NEW_LINE> assert_allclose(r.efficiency, 0.85) <NEW_LINE> assert_allclose(r.flow_unit_conversion, 1e3) <NEW_LINE> model.run() | Test example hydropower model loads and runs. | 625941bbdc8b845886cb53df |
def __len__(self): <NEW_LINE> <INDENT> return len(self.container[0]) | Returns the number of batches in the dataset
| 625941bba934411ee3751545 |
def set_zsort(self, zsort): <NEW_LINE> <INDENT> if zsort is True: <NEW_LINE> <INDENT> zsort = 'average' <NEW_LINE> <DEDENT> if zsort is not False: <NEW_LINE> <INDENT> if zsort in self._zsort_functions: <NEW_LINE> <INDENT> zsortfunc = self._zsort_functions[zsort] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> zsortfunc = None <NEW_LINE> <DEDENT> self._zsort = zsort <NEW_LINE> self._sort_zpos = None <NEW_LINE> self._zsortfunc = zsortfunc | Set z-sorting behaviour:
boolean: if True use default 'average'
string: 'average', 'min' or 'max' | 625941bb046cf37aa974cbf5 |
def get_debug(self): <NEW_LINE> <INDENT> return self.debug | Return the python logging level | 625941bb091ae35668666e0f |
def cycle(iterable): <NEW_LINE> <INDENT> while True: <NEW_LINE> <INDENT> for element in iterable: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> yield element <NEW_LINE> <DEDENT> except StopIteration: <NEW_LINE> <INDENT> cycle(iterable) | cycle([1, 2, 3]) -> 1 2 3 1 2 ... | 625941bb66656f66f7cbc055 |
def make(self, src_templ, evaldict=None, addsource=False, **attrs): <NEW_LINE> <INDENT> src = src_templ % vars(self) <NEW_LINE> evaldict = evaldict or {} <NEW_LINE> mo = DEF.match(src) <NEW_LINE> if mo is None: <NEW_LINE> <INDENT> raise SyntaxError("not a valid function template\n%s" % src) <NEW_LINE> <DEDENT> name = mo.group(1) <NEW_LINE> names = set( [name] + [arg.strip(" *") for arg in self.shortsignature.split(",")] ) <NEW_LINE> for n in names: <NEW_LINE> <INDENT> if n in ("_func_", "_call_"): <NEW_LINE> <INDENT> raise NameError("%s is overridden in\n%s" % (n, src)) <NEW_LINE> <DEDENT> <DEDENT> if not src.endswith("\n"): <NEW_LINE> <INDENT> src += "\n" <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> code = compile(src, "<string>", "single") <NEW_LINE> eval(code, evaldict, evaldict) <NEW_LINE> <DEDENT> except Exception: <NEW_LINE> <INDENT> raise <NEW_LINE> <DEDENT> func = evaldict[name] <NEW_LINE> if addsource: <NEW_LINE> <INDENT> attrs["__source__"] = src <NEW_LINE> <DEDENT> self.update(func, **attrs) <NEW_LINE> return func | Make a new function from a given template and update the signature | 625941bb9c8ee82313fbb620 |
def transfer(ctx, _to='address', _value='uint256', returns=STATUS): <NEW_LINE> <INDENT> log.DEV('In Fungible.transfer') <NEW_LINE> if ctx.accounts[ctx.msg_sender] >= _value: <NEW_LINE> <INDENT> ctx.accounts[ctx.msg_sender] -= _value <NEW_LINE> ctx.accounts[_to] += _value <NEW_LINE> ctx.Transfer(ctx.msg_sender, _to, _value) <NEW_LINE> return OK <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return INSUFFICIENTFUNDS | Standardized Contract API:
function transfer(address _to, uint256 _value) returns (bool _success) | 625941bb91f36d47f21ac39a |
def __init__ (self, raw, logger=None): <NEW_LINE> <INDENT> self.__data = raw <NEW_LINE> self.__process_metadata() <NEW_LINE> self.log = logger if logger is not None else logging.getLogger(__name__) | Constructor.
:param raw: raw data parsed from JSON file
:type raw: dict
:return: None | 625941bb38b623060ff0ac9a |
def accented_syllable_to_numbered(s): <NEW_LINE> <INDENT> if s[0] == '\u00B7': <NEW_LINE> <INDENT> lowercase_syllable, case_memory = _lower_case(s[1:]) <NEW_LINE> lowercase_syllable = '\u00B7' + lowercase_syllable <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> lowercase_syllable, case_memory = _lower_case(s) <NEW_LINE> <DEDENT> numbered_syllable, tone = _parse_accented_syllable(lowercase_syllable) <NEW_LINE> return _restore_case(numbered_syllable, case_memory) + tone | Convert accented Pinyin syllable *s* to a numbered Pinyin syllable. | 625941bbadb09d7d5db6c63d |
def processAgentProtectionRequest(self, data): <NEW_LINE> <INDENT> return self.session.request('corecallback/agentprotectionrequest', 'POST', self.getXML(data, 'coreCallbackRequest')) | The method is called by failover agent in order to
perform remote pairing. This method is for internal usage
only. | 625941bb099cdd3c635f0b07 |
def load_ids_whitelist(filename, filetype, whitelist): <NEW_LINE> <INDENT> for single_name in load_ids(filename, filetype): <NEW_LINE> <INDENT> if single_name in whitelist: <NEW_LINE> <INDENT> yield single_name <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> sys.exit( "Unexpected ID %s in %s file %s" % (single_name, filetype, filename) ) | Check if ids are in whitelist. | 625941bb3c8af77a43ae3648 |
def getLast(self): <NEW_LINE> <INDENT> return self.data[-1] | Returns the last datapoint. | 625941bb63d6d428bbe4439a |
def testGroups(self): <NEW_LINE> <INDENT> self.assertEqual(len(self.h5_file.items()), 1) <NEW_LINE> self.assertTrue('time_series' in self.h5_file) | Check that the number of groups under / is what we expect | 625941bbd58c6744b4257b0b |
def hasCycle(self, head): <NEW_LINE> <INDENT> slow = fast = head <NEW_LINE> while slow: <NEW_LINE> <INDENT> slow = slow.next <NEW_LINE> if not fast or not fast.next: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> fast = fast.next.next <NEW_LINE> if slow == fast: <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> <DEDENT> return False | :type head: ListNode
:rtype: bool | 625941bb3eb6a72ae02ec37f |
def main(): <NEW_LINE> <INDENT> creds = Credentials(['apic', 'nosnapshotfiles'], description=("This application replicates the switch " "CLI command 'show interface fex'")) <NEW_LINE> creds.add_argument('-s', '--switch', type=str, default=None, help='Specify a particular switch id, e.g. "101"') <NEW_LINE> creds.add_argument('-i', '--interface', type=str, default=None, help='Specify a particular interface id, e.g. "po101"') <NEW_LINE> args = creds.get() <NEW_LINE> interface_collector = InterfaceCollector(args.url, args.login, args.password) <NEW_LINE> interface_collector.show_summary(node=args.switch, intf_id=args.interface) | Main common routine for show interface description
:return: None | 625941bbe8904600ed9f1dd4 |
def run_git(args, git_path=_DEFAULT_GIT, input=None, capture_stdout=False, **popen_kwargs): <NEW_LINE> <INDENT> args = [git_path] + args <NEW_LINE> popen_kwargs['stdin'] = subprocess.PIPE <NEW_LINE> if capture_stdout: <NEW_LINE> <INDENT> popen_kwargs['stdout'] = subprocess.PIPE <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> popen_kwargs.pop('stdout', None) <NEW_LINE> <DEDENT> p = subprocess.Popen(args, **popen_kwargs) <NEW_LINE> stdout, stderr = p.communicate(input=input) <NEW_LINE> return (p.returncode, stdout) | Run a git command.
Input is piped from the input parameter and output is sent to the standard
streams, unless capture_stdout is set.
:param args: A list of args to the git command.
:param git_path: Path to to the git executable.
:param input: Input data to be sent to stdin.
:param capture_stdout: Whether to capture and return stdout.
:param popen_kwargs: Additional kwargs for subprocess.Popen;
stdin/stdout args are ignored.
:return: A tuple of (returncode, stdout contents). If capture_stdout is
False, None will be returned as stdout contents.
:raise OSError: if the git executable was not found. | 625941bb26238365f5f0ed15 |
def getErrors(self): <NEW_LINE> <INDENT> return (self.destX - self.brain.my.x, self.destY - self.brain.my.Y, self.destH - self.brain.my.h) | Returns X,Y,H errors for how close we are to our destination positoning | 625941bb6fb2d068a760ef45 |
def get_best_move(board, scores): <NEW_LINE> <INDENT> possible_moves = board.get_empty_squares() <NEW_LINE> highest_score = -10000 <NEW_LINE> highest_scores = [] <NEW_LINE> for row, col in possible_moves: <NEW_LINE> <INDENT> score = scores[row][col] <NEW_LINE> if score > highest_score: <NEW_LINE> <INDENT> highest_score = score <NEW_LINE> highest_scores = [] <NEW_LINE> highest_scores.append((row, col)) <NEW_LINE> <DEDENT> elif score == highest_score: <NEW_LINE> <INDENT> highest_scores.append((row, col)) <NEW_LINE> <DEDENT> <DEDENT> random_pick = random.choice(highest_scores) <NEW_LINE> row = random_pick[0] <NEW_LINE> col = random_pick[1] <NEW_LINE> return (row, col) | This function takes a current board and a grid of scores. The function should find all of the empty squares with the maximum score and randomly return one of them as a (row, column) tuple.
It is an error to call this function with a board that has no empty squares (no possible next move), so your function may do whatever it wants in that case. The case where the board is full will not be tested. | 625941bb851cf427c661a3bd |
def __init__(self, ZenPack, componentA, componentB, type_=None, contained=True, *args, **kwargs ): <NEW_LINE> <INDENT> self.ZenPack = ZenPack <NEW_LINE> from .Component import Component <NEW_LINE> self.logger = logger = logging.getLogger('ZenPack Generator') <NEW_LINE> name = "-".join([componentA, componentB]) <NEW_LINE> layer = "%s:%s" % (self.__class__.__name__, name) <NEW_LINE> if type_ not in self.valid_relationship_types: <NEW_LINE> <INDENT> msg = "WARNING: [%s] unknown type: '%s'. Defaulted to '%s'. " <NEW_LINE> layer = "%s:%s" % (self.__class__.__name__, name) <NEW_LINE> margs = (layer, type, '1-M') <NEW_LINE> if type_ == 'M-1': <NEW_LINE> <INDENT> a_b = (componentA, componentB) <NEW_LINE> msg += "Reversed '%s' and '%s'." % a_b <NEW_LINE> swap = componentB <NEW_LINE> componentB = componentA <NEW_LINE> componentA = swap <NEW_LINE> <DEDENT> if type_ is not None: <NEW_LINE> <INDENT> warn(self.logger, yellow(msg) % margs) <NEW_LINE> <DEDENT> type_ = '1-M' <NEW_LINE> <DEDENT> for key in kwargs: <NEW_LINE> <INDENT> do_not_warn = False <NEW_LINE> msg = "WARNING: [%s] unknown keyword ignored in file: '%s'" <NEW_LINE> margs = (layer, key) <NEW_LINE> if key == "Type": <NEW_LINE> <INDENT> msg = "WARNING: [%s] keyword deprecated: " "'%s' is now '%s'." <NEW_LINE> margs = (layer, key, key.lower()) <NEW_LINE> self.type_ = kwargs[key] <NEW_LINE> <DEDENT> elif key == "type": <NEW_LINE> <INDENT> self.type_ = type_ = kwargs[key] <NEW_LINE> do_not_warn = True <NEW_LINE> <DEDENT> elif key == "Contained": <NEW_LINE> <INDENT> msg = "WARNING: [%s] keyword deprecated: " "'%s' is now '%s'." <NEW_LINE> margs = (layer, key, key.lower()) <NEW_LINE> self.contained = kwargs[key] <NEW_LINE> <DEDENT> if not do_not_warn: <NEW_LINE> <INDENT> warn(self.logger, yellow(msg) % margs) <NEW_LINE> <DEDENT> <DEDENT> lookup = Component.lookup <NEW_LINE> self.components = lookup( ZenPack, componentA), lookup(ZenPack, componentB) <NEW_LINE> self.id = '%s %s' % (self.components[0].id, self.components[1].id) <NEW_LINE> self.type_ = type_ <NEW_LINE> self.contained = contained <NEW_LINE> self.ZenPack.registerRelationship(self) <NEW_LINE> Relationship.relationships[self.id] = self | Args:
ZenPack: A ZenPack Class instance
componentA: Parent component string id
componentB: Child component string id
type_: Relationship type_. Valid inputs [1-1, 1-M, M-M]
contained: ComponentA contains ComponentB True or False | 625941bb7b180e01f3dc46af |
def find_functions(ast_body, prefix=None): <NEW_LINE> <INDENT> functions = [] <NEW_LINE> for node in ast_body: <NEW_LINE> <INDENT> if isinstance(node, ast.FunctionDef): <NEW_LINE> <INDENT> name = node.name if prefix is None else prefix + '_' + node.name <NEW_LINE> arguments = [] <NEW_LINE> for i, arg in enumerate(node.args.args): <NEW_LINE> <INDENT> if arg.id != 'self': <NEW_LINE> <INDENT> arguments.append(arg.id) <NEW_LINE> <DEDENT> <DEDENT> functions.append({'name': name, 'arguments': arguments}) <NEW_LINE> <DEDENT> elif isinstance(node, ast.ClassDef): <NEW_LINE> <INDENT> name = node.name if prefix is None else prefix + '_' + node.name <NEW_LINE> functions.extend(find_functions(node.body, name)) <NEW_LINE> <DEDENT> <DEDENT> return functions | Loop through the ast nodes looking for functions and classes.
Add functions to the returned list.
Call this find_functions again if we find a class. | 625941bb56ac1b37e6264081 |
def toVariantContexts(self): <NEW_LINE> <INDENT> vcs = self._jvmRdd.toVariantContexts() <NEW_LINE> return VariantContextDataset(vcs, self.sc) | :return: These variants, converted to variant contexts. | 625941bb5fcc89381b1e156f |
def __getitem__(self, item): <NEW_LINE> <INDENT> return self.__dict__[item] | Gets the event hook with the specified name. | 625941bb090684286d50eb8c |
def db_hmfnf(): <NEW_LINE> <INDENT> global hmf_nfdb <NEW_LINE> if hmf_nfdb is None: <NEW_LINE> <INDENT> hmf_nfdb = getDBConnection().hmfs.fields <NEW_LINE> <DEDENT> return hmf_nfdb | Return the fields collection from the HMF database | 625941bb091ae35668666e10 |
def _log_no_entries(self): <NEW_LINE> <INDENT> _LOGGER.debug('No new entries in feed "%s"', self._url) | Send no entries log at debug level. | 625941bbbe8e80087fb20af3 |
def test_unlogged_table(self): <NEW_LINE> <INDENT> inmap = self.std_map() <NEW_LINE> inmap['schema public'].update({'table t1': { 'columns': [{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}}], 'unlogged': True}}) <NEW_LINE> sql = self.to_sql(inmap) <NEW_LINE> assert fix_indent(sql[0]) == "CREATE UNLOGGED TABLE t1 (c1 integer, c2 text)" | Create an unlogged table | 625941bbbe383301e01b5337 |
def put(self, path, params=None): <NEW_LINE> <INDENT> if params is None or params == '': <NEW_LINE> <INDENT> params = None <NEW_LINE> <DEDENT> if path is None or path == '': <NEW_LINE> <INDENT> logger.error("接口名字错误") <NEW_LINE> return False <NEW_LINE> <DEDENT> params = self.__get__relations(params) <NEW_LINE> params = self.__get_data(params) <NEW_LINE> if not path.startswith("http"): <NEW_LINE> <INDENT> path = self.url + "/" + path <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> self.result = self.session.put(path, data=params) <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> self.result = None <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> resulttext = self.result.text <NEW_LINE> resulttext = resulttext[resulttext.find('{'):resulttext.rfind('}') + 1] <NEW_LINE> self.jsonres = json.loads(resulttext) <NEW_LINE> logger.info(self.jsonres) <NEW_LINE> print(str(self.jsonres)) <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> logger.exception(e) <NEW_LINE> self.jsonres = None <NEW_LINE> <DEDENT> return True | 以data字典形式传键值对参数
:param path: 接口地址
:param params: 参数字典
:return: 成功失败 | 625941bb21bff66bcd684800 |
def _tr(self): <NEW_LINE> <INDENT> if not self.signalHash.has_key('tr'): <NEW_LINE> <INDENT> val = self.midpt[0] <NEW_LINE> vals = [val] <NEW_LINE> for i in range(1, len(self.open)): <NEW_LINE> <INDENT> val = max(abs(self.high[i] - self.low[i]), abs(self.high[i] - self.close[i - 1]), abs(self.close[i - 1] - self.low[i])) <NEW_LINE> vals.append(val) <NEW_LINE> <DEDENT> self.signalHash['tr'] = np.array(vals) <NEW_LINE> <DEDENT> return self.signalHash['tr'] | Smooth | 625941bb0383005118ecf490 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.