code
stringlengths 4
4.48k
| docstring
stringlengths 1
6.45k
| _id
stringlengths 24
24
|
---|---|---|
def __get_next_target(self): <NEW_LINE> <INDENT> target_i = self.targets[self.clicks % self.params.get_number_of_targets()] <NEW_LINE> return self.circle.get_position_at(target_i) | Finds the position of the next target and returns it. | 625941b663f4b57ef0000f40 |
def test_translated_exons(self): <NEW_LINE> <INDENT> gene = self.mouse.get_gene_by_stableid(stableid="ENSMUSG00000036136") <NEW_LINE> transcript = gene.get_member("ENSMUST00000041133") <NEW_LINE> self.assertTrue(len(transcript.protein_seq) > 0) <NEW_LINE> gene = self.mouse.get_gene_by_stableid(stableid="ENSMUSG00000045912") <NEW_LINE> transcript = gene.transcripts[0] <NEW_LINE> self.assertTrue(len(transcript.protein_seq) > 0) | should correctly translate a gene with 2 exons but 1st exon
transcribed | 625941b6cdde0d52a9e52e4b |
def replenish_shelves(self, batch: BatchNew): <NEW_LINE> <INDENT> for item_id, item in batch.items.items(): <NEW_LINE> <INDENT> if item.shelf: <NEW_LINE> <INDENT> self.item_id_pod_id_dict[item.orig_ID][item.shelf] += 1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> pass | if a batch is destroyed, the items that were taken from the shelves during the tour need to be added to the
shelves again.
:param batch:
:return: | 625941b67047854f462a122a |
def get_policy_profiles(self, context, filters=None, fields=None): <NEW_LINE> <INDENT> if context.is_admin or not c_conf.CISCO_N1K.restrict_policy_profiles: <NEW_LINE> <INDENT> return self._get_collection(context, n1kv_models_v2.PolicyProfile, self._make_policy_profile_dict, filters=filters, fields=fields) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return self._get_policy_collection_for_tenant(context.session, n1kv_models_v2. PolicyProfile, context.tenant_id) | Retrieve a list of policy profiles.
Retrieve all policy profiles if tenant is admin. For a non-admin
tenant, retrieve all policy profiles belonging to this tenant only.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for a
policy profile object. Values in this dictiontary are
an iterable containing values that will be used for an
exact match comparison for that value. Each result
returned by this function will have matched one of the
values for each key in filters
:params fields: a list of strings that are valid keys in a policy
profile dictionary. Only these fields will be returned
:returns: list of all policy profiles | 625941b6009cb60464c631da |
def end_time(self): <NEW_LINE> <INDENT> return self.time_range()[1] | Returns the global end time in seconds. | 625941b6de87d2750b85fbab |
def test_build_ex_12(self): <NEW_LINE> <INDENT> sd = SourceDescription() <NEW_LINE> sd.describedby = 'http://example.com/info_about_source.xml' <NEW_LINE> cl1 = CapabilityList(uri='http://example.com/capabilitylist1.xml') <NEW_LINE> cl1.describedby = 'http://example.com/info_about_set1_of_resources.xml' <NEW_LINE> sd.add_capability_list(cl1) <NEW_LINE> cl2 = CapabilityList(uri='http://example.com/capabilitylist2.xml') <NEW_LINE> cl2.describedby = 'http://example.com/info_about_set2_of_resources.xml' <NEW_LINE> sd.add_capability_list(cl2) <NEW_LINE> cl3 = CapabilityList(uri='http://example.com/capabilitylist3.xml') <NEW_LINE> cl3.describedby = 'http://example.com/info_about_set3_of_resources.xml' <NEW_LINE> sd.add_capability_list(cl3) <NEW_LINE> ex_xml = self._read_ex('resourcesync_ex_12') <NEW_LINE> self._assert_xml_equal(sd.as_xml(), ex_xml) | Source Description document with describedby links | 625941b61d351010ab85593b |
@util.cache_results <NEW_LINE> def get_weeks_in_year(year): <NEW_LINE> <INDENT> cal_year, cal_ord_days = get_ordinal_date_week_date_start(year) <NEW_LINE> cal_year_next, cal_ord_days_next = get_ordinal_date_week_date_start( year + 1) <NEW_LINE> diff_days = cal_ord_days_next - cal_ord_days <NEW_LINE> for intervening_year in range(cal_year, cal_year_next): <NEW_LINE> <INDENT> diff_days += get_days_in_year(intervening_year) <NEW_LINE> <DEDENT> return diff_days / DAYS_IN_WEEK | Return the number of calendar weeks in this week date year. | 625941b6a934411ee37514b8 |
def get_video_info_for_course_and_profiles(course_id, profiles): <NEW_LINE> <INDENT> course_id = unicode(course_id) <NEW_LINE> try: <NEW_LINE> <INDENT> encoded_videos = EncodedVideo.objects.filter( profile__profile_name__in=profiles, video__courses__course_id=course_id ).select_related() <NEW_LINE> <DEDENT> except Exception: <NEW_LINE> <INDENT> error_message = u"Could not get encoded videos for course: {0}".format(course_id) <NEW_LINE> logger.exception(error_message) <NEW_LINE> raise ValInternalError(error_message) <NEW_LINE> <DEDENT> return_dict = {} <NEW_LINE> for enc_vid in encoded_videos: <NEW_LINE> <INDENT> return_dict.setdefault(enc_vid.video.edx_video_id, {}).update( { "duration": enc_vid.video.duration, } ) <NEW_LINE> return_dict[enc_vid.video.edx_video_id].setdefault("profiles", {}).update( {enc_vid.profile.profile_name: { "url": enc_vid.url, "file_size": enc_vid.file_size, }} ) <NEW_LINE> <DEDENT> return return_dict | Returns a dict of edx_video_ids with a dict of requested profiles.
Args:
course_id (str): id of the course
profiles (list): list of profile_names
Returns:
(dict): Returns all the profiles attached to a specific
edx_video_id
{
edx_video_id: {
'duration': length of the video in seconds,
'profiles': {
profile_name: {
'url': url of the encoding
'file_size': size of the file in bytes
},
}
},
}
Example:
Given two videos with two profiles each in course_id 'test_course':
{
u'edx_video_id_1': {
u'duration: 1111,
u'profiles': {
u'mobile': {
'url': u'http: //www.example.com/meow',
'file_size': 2222
},
u'desktop': {
'url': u'http: //www.example.com/woof',
'file_size': 4444
}
}
},
u'edx_video_id_2': {
u'duration: 2222,
u'profiles': {
u'mobile': {
'url': u'http: //www.example.com/roar',
'file_size': 6666
},
u'desktop': {
'url': u'http: //www.example.com/bzzz',
'file_size': 8888
}
}
}
} | 625941b68e05c05ec3eea18e |
def get_posts(post_dir): <NEW_LINE> <INDENT> from yak import Post <NEW_LINE> posts = [] <NEW_LINE> for root, _, files in os.walk(post_dir): <NEW_LINE> <INDENT> for filename in files: <NEW_LINE> <INDENT> valid_filename = is_valid_filename(filename) <NEW_LINE> if valid_filename: <NEW_LINE> <INDENT> with open(os.path.join(root, filename), 'r', 'utf-8') as f: <NEW_LINE> <INDENT> markdown = f.read() <NEW_LINE> <DEDENT> post = is_valid_post(markdown, valid_filename['published']) <NEW_LINE> if post: <NEW_LINE> <INDENT> posts.append(Post(root, filename, post)) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> return posts | Returns a list of Post objects for a given directory. | 625941b6ad47b63b2c509da7 |
def _prepare_image(self, image): <NEW_LINE> <INDENT> img = tf.image.decode_png(image, channels=self.channels) <NEW_LINE> resized = tf.image.resize_images(img, [32, 100], method=tf.image.ResizeMethod.BICUBIC) <NEW_LINE> return resized | Resize the image to a maximum height of `self.height` and maximum
width of `self.width` while maintaining the aspect ratio. Pad the
resized image to a fixed size of ``[self.height, self.width]``. | 625941b63317a56b86939a87 |
def close(self, position: Position, level: float) -> None: <NEW_LINE> <INDENT> self._api_close_position(position, level) | Close a currently open position. | 625941b63d592f4c4ed1ce9e |
def filterFiles(self, directoryPath, extension): <NEW_LINE> <INDENT> relevant_path = directoryPath <NEW_LINE> included_extensions = [extension] <NEW_LINE> file_names = [file1 for file1 in os.listdir(relevant_path) if any(file1.endswith(ext) for ext in included_extensions)] <NEW_LINE> numberOfFiles = len(file_names) <NEW_LINE> listParams = [file_names, numberOfFiles] <NEW_LINE> return listParams | This method filters the format files with the selected extension in the directory
Args:
directoryPath (str): relative path of the directory that contains text files
extension (str): extension file
Returns:
The list of filtered files with the selected extension | 625941b696565a6dacc8f4f2 |
def setJobAttributes(self, jobID, attrNames, attrValues, update=False, myDate=None): <NEW_LINE> <INDENT> jobIDList = jobID <NEW_LINE> if not isinstance(jobID, (list, tuple)): <NEW_LINE> <INDENT> jobIDList = [jobID] <NEW_LINE> <DEDENT> jIDList = [] <NEW_LINE> for jID in jobIDList: <NEW_LINE> <INDENT> ret = self._escapeString(jID) <NEW_LINE> if not ret['OK']: <NEW_LINE> <INDENT> return ret <NEW_LINE> <DEDENT> jIDList.append(ret['Value']) <NEW_LINE> <DEDENT> if len(attrNames) != len(attrValues): <NEW_LINE> <INDENT> return S_ERROR('JobDB.setAttributes: incompatible Argument length') <NEW_LINE> <DEDENT> for attrName in attrNames: <NEW_LINE> <INDENT> if attrName not in self.jobAttributeNames: <NEW_LINE> <INDENT> return S_ERROR(EWMSSUBM, 'Request to set non-existing job attribute') <NEW_LINE> <DEDENT> <DEDENT> attr = [] <NEW_LINE> for name, value in zip(attrNames, attrValues): <NEW_LINE> <INDENT> ret = self._escapeString(value) <NEW_LINE> if not ret['OK']: <NEW_LINE> <INDENT> return ret <NEW_LINE> <DEDENT> attr.append("%s=%s" % (name, ret['Value'])) <NEW_LINE> <DEDENT> if update: <NEW_LINE> <INDENT> attr.append("LastUpdateTime=UTC_TIMESTAMP()") <NEW_LINE> <DEDENT> if not attr: <NEW_LINE> <INDENT> return S_ERROR('JobDB.setAttributes: Nothing to do') <NEW_LINE> <DEDENT> cmd = 'UPDATE Jobs SET %s WHERE JobID in ( %s )' % (', '.join(attr), ', '.join(jIDList)) <NEW_LINE> if myDate: <NEW_LINE> <INDENT> cmd += ' AND LastUpdateTime < %s' % myDate <NEW_LINE> <DEDENT> return self._transaction([cmd]) | Set one or more attribute values for one or more jobs specified by jobID.
The LastUpdate time stamp is refreshed if explicitly requested with the update flag
:param jobID: one or more job IDs
:type jobID: int or str or python:list
:param list attrNames: names of attributes to update
:param list attrValues: corresponding values of attributes to update
:param bool update: optional flag to update the job LastUpdateTime stamp
:param str myDate: optional time stamp for the LastUpdateTime attribute
:return: S_OK/S_ERROR | 625941b6377c676e91271fc8 |
def cross(self, dirs): <NEW_LINE> <INDENT> patterns = [ { 'empty': [Direction.northwest, Direction.southeast], 'filled': [ Direction.north, Direction.south, Direction.east, Direction.west ], 'transformations': [Direction.l_rotate] } ] <NEW_LINE> for pattern in patterns: <NEW_LINE> <INDENT> if self.pattern_match(dirs, pattern): <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> <DEDENT> return False | returns true if self should be a cross according to dirs
Valid cases:
.#d
#c#
d#. | 625941b621a7993f00bc7b06 |
def parse_vote(self, number, question, withdrawals=[]): <NEW_LINE> <INDENT> pass | parse vote | 625941b6d53ae8145f87a094 |
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.nand, DiskType.optane])) <NEW_LINE> @pytest.mark.require_disk("core", DiskTypeLowerThan("cache")) <NEW_LINE> @pytest.mark.parametrize("shortcut", [True, False]) <NEW_LINE> def test_cli_add_remove_custom_id(shortcut): <NEW_LINE> <INDENT> with TestRun.step("Prepare the devices."): <NEW_LINE> <INDENT> cache_disk = TestRun.disks['cache'] <NEW_LINE> cache_disk.create_partitions([Size(50, Unit.MebiByte)]) <NEW_LINE> cache_device = cache_disk.partitions[0] <NEW_LINE> core_device = TestRun.disks['core'] <NEW_LINE> <DEDENT> with TestRun.step("Start the cache and add the core with a random ID."): <NEW_LINE> <INDENT> core_id = randint(*CORE_ID_RANGE) <NEW_LINE> cache = casadm.start_cache(cache_device, shortcut=shortcut, force=True) <NEW_LINE> core = casadm.add_core(cache, core_device, core_id=core_id, shortcut=shortcut) <NEW_LINE> TestRun.LOGGER.info(f"Core ID: {core_id}") <NEW_LINE> <DEDENT> with TestRun.step("Check if the core is added to the cache."): <NEW_LINE> <INDENT> caches = casadm_parser.get_caches() <NEW_LINE> if len(caches[0].get_core_devices()) != 1: <NEW_LINE> <INDENT> TestRun.fail("One core should be present in the cache.") <NEW_LINE> <DEDENT> if caches[0].get_core_devices()[0].path != core.path: <NEW_LINE> <INDENT> TestRun.fail("The core path should be equal to the path of the core added.") <NEW_LINE> <DEDENT> <DEDENT> with TestRun.step("Remove the core from the cache."): <NEW_LINE> <INDENT> casadm.remove_core(cache.cache_id, core.core_id, shortcut=shortcut) <NEW_LINE> <DEDENT> with TestRun.step("Check if the core is successfully removed from still running cache."): <NEW_LINE> <INDENT> caches = casadm_parser.get_caches() <NEW_LINE> if len(caches) != 1: <NEW_LINE> <INDENT> TestRun.fail("One cache should be still present after removing the core.") <NEW_LINE> <DEDENT> if len(caches[0].get_core_devices()) != 0: <NEW_LINE> <INDENT> TestRun.fail("No core device should be present after removing the core.") <NEW_LINE> <DEDENT> <DEDENT> with TestRun.step("Stop the cache."): <NEW_LINE> <INDENT> casadm.stop_cache(cache_id=cache.cache_id, shortcut=shortcut) <NEW_LINE> <DEDENT> with TestRun.step("Check if the cache has successfully stopped."): <NEW_LINE> <INDENT> caches = casadm_parser.get_caches() <NEW_LINE> if len(caches) != 0: <NEW_LINE> <INDENT> TestRun.fail("No cache should be present after stopping the cache.") <NEW_LINE> <DEDENT> output = casadm.list_caches(shortcut=shortcut) <NEW_LINE> cli_messages.check_stdout_msg(output, cli_messages.no_caches_running) | title: Test for adding and removing a core with a custom ID - short and long command
description: |
Start a new cache and add a core to it with passing a random core ID
(from allowed pool) as an argument and then remove this core from the cache.
pass_criteria:
- The core is added to the cache with a default ID
- The core is successfully removed from the cache | 625941b60c0af96317bb8007 |
def buildGenotypeToAnnotation(self): <NEW_LINE> <INDENT> cols = self.results[-1][0] <NEW_LINE> evidenceKeyCol = Gatherer.columnNumber (cols, '_annotevidence_key') <NEW_LINE> annotTypeCol = Gatherer.columnNumber (cols, 'annottype') <NEW_LINE> objectKeyCol = Gatherer.columnNumber (cols, '_object_key') <NEW_LINE> refsKeyCol = Gatherer.columnNumber (cols, '_refs_key') <NEW_LINE> mgitypeKeyCol = Gatherer.columnNumber (cols, '_mgitype_key') <NEW_LINE> for rows in self.annotGroupsRows: <NEW_LINE> <INDENT> repRow = rows[0] <NEW_LINE> newAnnotationKey = self.evidenceKeyToNew[repRow[evidenceKeyCol]] <NEW_LINE> annotType = repRow[annotTypeCol] <NEW_LINE> objectKey = repRow[objectKeyCol] <NEW_LINE> mgitype = MGITYPE_LOOKUP.get(repRow[mgitypeKeyCol]) <NEW_LINE> if mgitype == 'Genotype': <NEW_LINE> <INDENT> self.addRow('genotype_to_annotation', ( objectKey, newAnnotationKey, annotType )) | Build the genotype_to_annotation table | 625941b64428ac0f6e5ba60f |
@contextmanager <NEW_LINE> def temp_path(): <NEW_LINE> <INDENT> path = sys.path[:] <NEW_LINE> try: <NEW_LINE> <INDENT> yield <NEW_LINE> <DEDENT> finally: <NEW_LINE> <INDENT> sys.path = path[:] | A context manager which allows the ability to set sys.path temporarily | 625941b645492302aab5e0dd |
def streamLogs(self): <NEW_LINE> <INDENT> return self.__get_stream('/stream/logs') | Return systemd-journald logs in chunked packages. | 625941b6e8904600ed9f1d46 |
def RandomPointOnSphere(): <NEW_LINE> <INDENT> u = rand.random()*pow(-1., rand.randint(0,1)) <NEW_LINE> theta = rand.random()*2*math.pi <NEW_LINE> x = math.sqrt(1-(u*u))*math.cos(theta) <NEW_LINE> y = math.sqrt(1 - (u*u))*math.sin(theta) <NEW_LINE> z = u <NEW_LINE> return np.array((x,y,z)) | Computes a random point on a sphere
Returns - a point on a unit sphere [x,y,z] at the origin | 625941b6097d151d1a222c7a |
def __init__(self, config): <NEW_LINE> <INDENT> self.db = web.database(dbn=config['dbn'], host=config['host'], user = config['user'], pw = config['password'], db = config['database']) | Crea la conexión con la base de datos | 625941b62c8b7c6e89b355e1 |
def test_signup(self): <NEW_LINE> <INDENT> response = self.sign_up_user(self.user) <NEW_LINE> self.assertEqual(response.status_code, 201) <NEW_LINE> data = json.loads(str(response.data.decode())) <NEW_LINE> self.assertEqual(data['username'], self.user.username) | Tests whether a new user can sign up | 625941b6fff4ab517eb2f257 |
def log_update(sender, instance, **kwargs): <NEW_LINE> <INDENT> if instance.pk is not None: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> old = sender.objects.get(pk=instance.pk) <NEW_LINE> <DEDENT> except sender.DoesNotExist: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> new = instance <NEW_LINE> changes = model_instance_diff(old, new) <NEW_LINE> if changes: <NEW_LINE> <INDENT> log_entry = LogEntry.objects.log_create( instance, action=LogEntry.Action.UPDATE, changes=json.dumps(changes), ) | Signal receiver that creates a log entry when a model instance is changed and saved to the database.
Direct use is discouraged, connect your model through :py:func:`auditlog.registry.register` instead. | 625941b6adb09d7d5db6c5b1 |
@public.add <NEW_LINE> def hours(): <NEW_LINE> <INDENT> return minutes() % 60 | afk time in hours | 625941b67c178a314d6ef276 |
def prefetch(tensor_dict, capacity): <NEW_LINE> <INDENT> names = list(tensor_dict.keys()) <NEW_LINE> dtypes = [t.dtype for t in tensor_dict.values()] <NEW_LINE> shapes = [t.get_shape() for t in tensor_dict.values()] <NEW_LINE> prefetch_queue = tf.PaddingFIFOQueue(capacity, dtypes=dtypes, shapes=shapes, names=names, name='prefetch_queue') <NEW_LINE> enqueue_op = prefetch_queue.enqueue(tensor_dict) <NEW_LINE> tf.train.queue_runner.add_queue_runner(tf.train.queue_runner.QueueRunner(prefetch_queue, [enqueue_op])) <NEW_LINE> tf.summary.scalar('queue/%s/fraction_of_%d_full' % (prefetch_queue.name, capacity), tf.to_float(prefetch_queue.size()) * (1. / capacity)) <NEW_LINE> return prefetch_queue | creates a prefetch queue for tensors.
Create a FIFO queue to asynchronously enqueue tensor_dicts and returns a dequeue op that evaluates to
a tensor_dict. This function is useful in prefetching preprocessed tensors so that the data is readily available for
consumers
Example input pipeline when you don't need batching:
----------------------------------------------------
key, string_tensor = slim.parallel_reader.parallel_read(...)
tensor_dict = decoder.decode(string_tensor)
tensor_dict = preprocessor.preprocess(tensor_dict, ...)
prefetch_queue = prefetcher.prefetch(tensor_dict, capacity=20)
tensor_dict = prefetch_queue.dequeue()
outputs = Model(tensor_dict)
----------------------------------------------------
For input pipelines with batching, refer to core/batcher.py
Args:
tensor_dict: a dictionary of tensors to prefetch.
capacity: the size of the prefetch queue.
Returns:
a FIFO prefetcher queue | 625941b676d4e153a657e94e |
def write_position(fp, position, value, fmt='I'): <NEW_LINE> <INDENT> current_position = fp.tell() <NEW_LINE> fp.seek(position) <NEW_LINE> written = write_bytes(fp, struct.pack(str('>' + fmt), value)) <NEW_LINE> fp.seek(current_position) <NEW_LINE> return written | Writes a value to the specified position.
:param fp: file-like object
:param position: position of the value marker
:param value: value to write
:param fmt: format of the value
:return: written byte size | 625941b663d6d428bbe4430d |
def open(self): <NEW_LINE> <INDENT> pass | Begin the report. | 625941b60a50d4780f666cad |
def add_import(self, node: ast.Import) -> None: <NEW_LINE> <INDENT> for name in node.names: <NEW_LINE> <INDENT> self._add(name, node) | Add a 'import X, Y' statement | 625941b697e22403b379cdb6 |
def test_delete_messages_likes(self): <NEW_LINE> <INDENT> u = User( email="[email protected]", username="testuser", password="HASHED_PASSWORD" ) <NEW_LINE> db.session.add(u) <NEW_LINE> db.session.commit() <NEW_LINE> u2 = User( email="[email protected]", username="testuser2", password="HASHED_PASSWORD" ) <NEW_LINE> db.session.add(u2) <NEW_LINE> db.session.commit() <NEW_LINE> follow = Follows( user_being_followed_id=u.id, user_following_id=u2.id ) <NEW_LINE> db.session.add(follow) <NEW_LINE> db.session.commit() <NEW_LINE> m = Message( text="warblewarble", user_id=u.id ) <NEW_LINE> db.session.add(m) <NEW_LINE> db.session.commit() <NEW_LINE> l = Likes(user_that_liked=u2.id, message_liked=m.id) <NEW_LINE> db.session.add(l) <NEW_LINE> db.session.commit() <NEW_LINE> self.assertEqual(len(u2.likes), 1) <NEW_LINE> db.session.delete(m) <NEW_LINE> db.session.commit() <NEW_LINE> self.assertEqual(len(u2.likes), 0) <NEW_LINE> self.assertEqual(len(u.messages), 0) | test if message got deleted by the user and it deletes from likes table | 625941b621bff66bcd684773 |
def process_training_set(self, json_data, diff_to_target=False): <NEW_LINE> <INDENT> ready_training_set = [] <NEW_LINE> classes = [] <NEW_LINE> cpa_index = self.params.index('cpa_actual') <NEW_LINE> if 'cpa_target' in self.params: <NEW_LINE> <INDENT> line_length = len(self.params) - 1 <NEW_LINE> target_cpa_check = True <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> line_length = len(self.params) <NEW_LINE> target_cpa_check = False <NEW_LINE> <DEDENT> for case in json_data: <NEW_LINE> <INDENT> set = [] <NEW_LINE> question_classes = [] <NEW_LINE> for example in json_data[case]: <NEW_LINE> <INDENT> new_line = [0] * line_length <NEW_LINE> for par, value in example.iteritems(): <NEW_LINE> <INDENT> if par == 'Score': <NEW_LINE> <INDENT> question_classes += [value] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if par in self.params: <NEW_LINE> <INDENT> par_index = self.params.index(par) <NEW_LINE> if target_cpa_check: <NEW_LINE> <INDENT> if par == 'cpa_target': <NEW_LINE> <INDENT> if value is None: <NEW_LINE> <INDENT> line_item_tcpa = value <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> line_item_tcpa = float(value) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> new_line[par_index] = float(value) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> new_line[par_index] = float(value) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> if target_cpa_check: <NEW_LINE> <INDENT> if line_item_tcpa is None: <NEW_LINE> <INDENT> adjusted_cpa_to_target_value = -np.inf <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> adjusted_cpa_to_target_value = new_line[cpa_index] - line_item_tcpa <NEW_LINE> <DEDENT> new_line[cpa_index] = adjusted_cpa_to_target_value <NEW_LINE> <DEDENT> set.append(new_line) <NEW_LINE> <DEDENT> ready_training_set.append(set) <NEW_LINE> classes.append(question_classes) <NEW_LINE> <DEDENT> return ready_training_set, classes | :param json_data: Json data with a set of cases with multiple line items and their priority
:param diff_to_target:
:return: Processed data and an array of their classes | 625941b64c3428357757c149 |
def _validate_foo(self, argument, field, value): <NEW_LINE> <INDENT> pass | {'type': 'zap'} | 625941b6091ae35668666d84 |
def signal(signal=None): <NEW_LINE> <INDENT> ret = _get_return_dict() <NEW_LINE> valid_signals = 'start stop restart' <NEW_LINE> if not valid_signals.count(signal): <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> cmd = "{0} {1}".format(__opts__['solr.init_script'], signal) <NEW_LINE> out = __salt__['cmd.run'](cmd) | Signals Apache Solr to start, stop, or restart. Obvioulsy this is only
going to work if the minion resides on the solr host. Additionally
Solr doesn't ship with an init script so one must be created.
Param: str signal (None): The command to pass to the apache solr init
valid values are 'start', 'stop', and 'restart'
CLI Example:
salt '*' solr.signal restart | 625941b61b99ca400220a8ce |
def __init__( self, *, public_keys: Optional[List["SshPublicKey"]] = None, **kwargs ): <NEW_LINE> <INDENT> super(SshConfiguration, self).__init__(**kwargs) <NEW_LINE> self.public_keys = public_keys | :keyword public_keys: The list of SSH public keys used to authenticate with linux based VMs.
:paramtype public_keys: list[~azure.mgmt.compute.v2019_03_01.models.SshPublicKey] | 625941b671ff763f4b5494ac |
def wrap(obj): <NEW_LINE> <INDENT> if isinstance(obj, ast.AST): <NEW_LINE> <INDENT> return obj <NEW_LINE> <DEDENT> elif isinstance(obj, (list, dict, tuple, int, float, basestring)): <NEW_LINE> <INDENT> return escape_ast(obj) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise TypeError( "Cannot wrap objects of type %s into an AST" % (type(obj),)) | Wrap an object in an AST | 625941b6287bf620b61d388e |
def process_field(self, doc): <NEW_LINE> <INDENT> res = [] <NEW_LINE> field = doc["research_field"] <NEW_LINE> if field: <NEW_LINE> <INDENT> fields = re.split("[;]",field) <NEW_LINE> fields = [i.strip() for i in fields] <NEW_LINE> res.extend(fields) <NEW_LINE> <DEDENT> return res | 研究领域数组化 | 625941b6462c4b4f79d1d4ee |
def firstn(n): <NEW_LINE> <INDENT> num, nums = 0, [] <NEW_LINE> while num < n: <NEW_LINE> <INDENT> nums.append(num) <NEW_LINE> num += 1 <NEW_LINE> <DEDENT> return nums | return first n integers, builds the full list in memory | 625941b66fb2d068a760eebf |
def __init__(self, path, database, collection): <NEW_LINE> <INDENT> MongoFrame.__init__(self, path, database, collection) | Args:
path (str) path to a mongodb directory
database (str) name of a pymongo database
collection (str) name of a pymongo collection | 625941b66aa9bd52df036bc0 |
@register.filter <NEW_LINE> def get_range(value, min_value=1): <NEW_LINE> <INDENT> return range(min_value, value) | Filter - returns a list containing range made from given value
Usage (in template):
<ul>{% for i in 3|get_range %}
<li>{{ i }}. Do something</li>
{% endfor %}</ul>
Results with the HTML:
<ul>
<li>0. Do something</li>
<li>1. Do something</li>
<li>2. Do something</li>
</ul>
Instead of 3 one may use the variable set in the views | 625941b6293b9510aa2c30b7 |
def _on_term_popup_menu(self, widget, event=None): <NEW_LINE> <INDENT> if ((not event or event.type != Gdk.EventType.BUTTON_RELEASE or event.button != 3)): <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD) <NEW_LINE> if clipboard.wait_is_uris_available(): <NEW_LINE> <INDENT> self.menu_item_pastefilenames.show() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.menu_item_pastefilenames.hide() <NEW_LINE> <DEDENT> self.menu.popup(None, None, None, None, 3, 0) | Show context menu on right-clicks. | 625941b650485f2cf553cbb7 |
def _q(self, query, filters=[], page=0, page_size=None, order=[]): <NEW_LINE> <INDENT> if len(filters): <NEW_LINE> <INDENT> query = query.filter(*filters) <NEW_LINE> <DEDENT> if len(order): <NEW_LINE> <INDENT> query = query.order_by(*order) <NEW_LINE> <DEDENT> if page_size is not None and page_size > 0: <NEW_LINE> <INDENT> query = query.limit(page_size) <NEW_LINE> <DEDENT> if page > 0 and page_size is not None and page_size > 0: <NEW_LINE> <INDENT> query = query.offset(page*page_size) <NEW_LINE> <DEDENT> return query | from https://stackoverflow.com/questions/13258934/applying-limit-and-offset-to-all-queries-in-sqlalchemy/25943188#25943188
Do the sorting, limiting, filtering and paging in the database instead of on the web server, helper method for that
:rtype: :class:`sandman2.model.Model` | 625941b64527f215b584c279 |
def send_IM(self, pass_rate, total, _pass, failure, error, merge_report_file_name): <NEW_LINE> <INDENT> LogUtil.info(u"[IM SEND BEGIN] 开始发送IM 群通知。。。") <NEW_LINE> IM_body, pass_rate = IM_report.format_report_for_temporary_test(pass_rate=pass_rate, total_case=total, pass_case=_pass, failure_case=failure, error_case=error,) <NEW_LINE> custom_IM_body = self._custom_IM_body() <NEW_LINE> final_IM_body = u"{}\n详细报告 :<a href=http://{}/auto_reports/{}/{}>Report Link</a> \n{}".format(IM_body, tomcat_server, self.project_name, merge_report_file_name, custom_IM_body) <NEW_LINE> title_time = time.strftime("%Y-%m-%d_%H:%M", time.localtime()) <NEW_LINE> msg_title = u"{} 自动化测试报告_{} - 通过率 : {} %".format(self.project_name.upper(), title_time, pass_rate) <NEW_LINE> self.IM_client.send_msg_to_group(msg_title, final_IM_body, self.group_id) <NEW_LINE> for receiver in self.report_receiver_list: <NEW_LINE> <INDENT> self.IM_client.send_msg_to_person(msg_title, final_IM_body, receiver) | IM report 发送到IM群
如果需要自定义内容,可实现_custom_IM_body方法 | 625941b6b57a9660fec3369e |
def number_of_items(category_id): <NEW_LINE> <INDENT> return db_session.query(Item).filter_by(category_id=category_id).count() | Utility Method to get number of items in a particular category | 625941b6f9cc0f698b140424 |
def save(self, trial, storage=Checkpoint.DISK): <NEW_LINE> <INDENT> raise NotImplementedError("Subclasses of TrialExecutor must provide " "save() method") | Saves training state of this trial to a checkpoint.
Args:
trial (Trial): The state of this trial to be saved.
storage (str): Where to store the checkpoint. Defaults to DISK.
Return:
A Python object if storage==Checkpoint.MEMORY otherwise
a path to the checkpoint. | 625941b6f548e778e58cd39a |
def __init__(self, url, html, headers): <NEW_LINE> <INDENT> self.html = html <NEW_LINE> self.url = url <NEW_LINE> self.headers = headers <NEW_LINE> self.parsed_html = soup = BeautifulSoup(self.html, "html.parser") <NEW_LINE> self.scripts = [ script['src'] for script in soup.findAll('script', src=True) ] <NEW_LINE> self.meta = { meta['name'].lower(): meta['content'] for meta in soup.findAll( 'meta', attrs=dict(name=True, content=True)) } <NEW_LINE> self.title = soup.title.string if soup.title else 'None' <NEW_LINE> wappalyzer = Wappalyzer() <NEW_LINE> self.apps = wappalyzer.analyze(self) <NEW_LINE> self.result = ';'.join(self.apps) | Initialize a new WebPage object.
Parameters
----------
url : str
The web page URL.
html : str
The web page content (HTML)
headers : dict
The HTTP response headers | 625941b67cff6e4e811177a4 |
def read_raw_temp(self): <NEW_LINE> <INDENT> return self._sensor.read_raw_temp() | Reads the raw (uncompensated) temperature from the sensor. | 625941b61f5feb6acb0c4974 |
@database_cli.command() <NEW_LINE> @click.option('--config', help='Path to a folder with the db configuration') <NEW_LINE> @with_appcontext <NEW_LINE> def create(config): <NEW_LINE> <INDENT> if config: <NEW_LINE> <INDENT> path = Path(config) <NEW_LINE> if not path.exists(): <NEW_LINE> <INDENT> raise click.BadParameter( 'The specified config path does not exist.' ) <NEW_LINE> <DEDENT> elif not path.is_dir(): <NEW_LINE> <INDENT> raise click.BadParameter( 'The specified config path is not a directory.' ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> dirs = set(str(x.name) for x in path.iterdir() if x.is_dir()) <NEW_LINE> if not set(['predicates', 'pids', 'claimants']).issubset(dirs): <NEW_LINE> <INDENT> raise click.BadParameter( 'The specified directory must contain three folders: ' 'predicates, pids and claimants.' ) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> db.create_all() <NEW_LINE> load_all_predicates(config) <NEW_LINE> load_all_pids(config) <NEW_LINE> load_all_claimants(config) <NEW_LINE> click.echo('Database initialisation completed.') | Create database and populate it with basic data.
The database will be populated with the predicates, persistent identifiers
and claimants that are defined in `tests/myclaimstore/config`. An
alternative directory can be specified thanks to the argument `--config`. | 625941b63c8af77a43ae35bc |
def _check_request(self): <NEW_LINE> <INDENT> todo = [] <NEW_LINE> for task in self._postpone_request: <NEW_LINE> <INDENT> if self.task_queue[task['project']].is_processing(task['taskid']): <NEW_LINE> <INDENT> todo.append(task) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.on_request(task) <NEW_LINE> <DEDENT> <DEDENT> self._postpone_request = todo <NEW_LINE> tasks = {} <NEW_LINE> while len(tasks) < self.LOOP_LIMIT: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> task = self.newtask_queue.get_nowait() <NEW_LINE> <DEDENT> except Queue.Empty: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> if isinstance(task, list): <NEW_LINE> <INDENT> _tasks = task <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> _tasks = (task, ) <NEW_LINE> <DEDENT> for task in _tasks: <NEW_LINE> <INDENT> if not self.task_verify(task): <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> if task['taskid'] in self.task_queue[task['project']]: <NEW_LINE> <INDENT> if not task.get('schedule', {}).get('force_update', False): <NEW_LINE> <INDENT> logger.debug('ignore newtask %(project)s:%(taskid)s %(url)s', task) <NEW_LINE> continue <NEW_LINE> <DEDENT> <DEDENT> if task['taskid'] in tasks: <NEW_LINE> <INDENT> if not task.get('schedule', {}).get('force_update', False): <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> <DEDENT> tasks[task['taskid']] = task <NEW_LINE> <DEDENT> <DEDENT> for task in itervalues(tasks): <NEW_LINE> <INDENT> self.on_request(task) <NEW_LINE> <DEDENT> return len(tasks) | Check new task queue | 625941b6236d856c2ad445fd |
def edge(self, input_port, state): <NEW_LINE> <INDENT> assert input_port == 0, "Interconnect does not have multiple inputs." <NEW_LINE> self.new_state = state <NEW_LINE> return [] | Registers a rising or falling edge on the interconnect.
:param input_port: Index of the input
:param state: Value of the input (True/False) at time `when` | 625941b6b7558d58953c4d3a |
def check_remote_version(client, local_version): <NEW_LINE> <INDENT> env = client.forward(u'env', u'version')['result'] <NEW_LINE> remote_version = parse_version(env['version']) <NEW_LINE> if remote_version > local_version: <NEW_LINE> <INDENT> raise ScriptError( "Cannot install replica of a server of higher version ({}) than" "the local version ({})".format(remote_version, local_version)) | Verify remote server's version is not higher than this server's version
:param client: RPC client
:param local_version: API version of local server
:raises: ScriptError: if the checks fails | 625941b68a43f66fc4b53e88 |
def longest_contiguous_ones(x): <NEW_LINE> <INDENT> x = np.ravel(x) <NEW_LINE> if len(x) == 0: <NEW_LINE> <INDENT> return np.array([]) <NEW_LINE> <DEDENT> ind = (x == 0).nonzero()[0] <NEW_LINE> if len(ind) == 0: <NEW_LINE> <INDENT> return np.arange(len(x)) <NEW_LINE> <DEDENT> if len(ind) == len(x): <NEW_LINE> <INDENT> return np.array([]) <NEW_LINE> <DEDENT> y = np.zeros((len(x)+2,), x.dtype) <NEW_LINE> y[1:-1] = x <NEW_LINE> dif = np.diff(y) <NEW_LINE> up = (dif == 1).nonzero()[0] <NEW_LINE> dn = (dif == -1).nonzero()[0] <NEW_LINE> i = (dn-up == max(dn - up)).nonzero()[0][0] <NEW_LINE> ind = np.arange(up[i], dn[i]) <NEW_LINE> return ind | Return the indices of the longest stretch of contiguous ones in *x*,
assuming *x* is a vector of zeros and ones. If there are two
equally long stretches, pick the first. | 625941b6090684286d50eafe |
def testExpectedAttributes(self): <NEW_LINE> <INDENT> read = Read('id', 'atcgatcgatcg') <NEW_LINE> translated = TranslatedRead(read, 'IRDS', 0) <NEW_LINE> self.assertEqual('IRDS', translated.sequence) <NEW_LINE> self.assertEqual(0, translated.frame) | A TranslatedRead instance must have the expected attributes. | 625941b69b70327d1c4e0bf2 |
def init_weights(shape): <NEW_LINE> <INDENT> return tf.Variable(tf.truncated_normal(shape, 0, 0.05)) | Input: shape - this is the shape of a matrix used to represent weigts for the arbitrary layer
Output: wights randomly generated with size = shape | 625941b64a966d76dd550e2a |
def test_npn_advertise_error(self): <NEW_LINE> <INDENT> select_args = [] <NEW_LINE> def advertise(conn): <NEW_LINE> <INDENT> raise TypeError <NEW_LINE> <DEDENT> def select(conn, options): <NEW_LINE> <INDENT> select_args.append((conn, options)) <NEW_LINE> return b'' <NEW_LINE> <DEDENT> server_context = Context(TLSv1_METHOD) <NEW_LINE> server_context.set_npn_advertise_callback(advertise) <NEW_LINE> client_context = Context(TLSv1_METHOD) <NEW_LINE> client_context.set_npn_select_callback(select) <NEW_LINE> server_context.use_privatekey( load_privatekey(FILETYPE_PEM, server_key_pem)) <NEW_LINE> server_context.use_certificate( load_certificate(FILETYPE_PEM, server_cert_pem)) <NEW_LINE> server = Connection(server_context, None) <NEW_LINE> server.set_accept_state() <NEW_LINE> client = Connection(client_context, None) <NEW_LINE> client.set_connect_state() <NEW_LINE> self.assertRaises(TypeError, self._interactInMemory, server, client) <NEW_LINE> self.assertEqual([], select_args) | Test that we can handle exceptions in the advertise callback. If
advertise fails no NPN is advertised to the client. | 625941b6ff9c53063f47c01c |
def is_threeofakind(hand): <NEW_LINE> <INDENT> count = 0 <NEW_LINE> newlist = sorted(new(hand)) <NEW_LINE> for i in range(len(newlist)-2): <NEW_LINE> <INDENT> if newlist[i] == newlist[i+1] == newlist[i+2]: <NEW_LINE> <INDENT> count += 1 <NEW_LINE> <DEDENT> <DEDENT> if count == 1: <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> return False | threeofakind have same number cards of three and two different | 625941b667a9b606de4a7cdc |
@click.group() <NEW_LINE> def git() -> None: <NEW_LINE> <INDENT> pass | Run various `git` commands on the current *ssh auth dir*. | 625941b630dc7b7665901789 |
def __init__(self, resource, form=None): <NEW_LINE> <INDENT> self.resource = resource <NEW_LINE> self._form = form <NEW_LINE> self._config = DEFAULT | Args:
resource: the S3Resource
form: an S3SQLForm instance to override settings | 625941b62ae34c7f2600cf50 |
def validate_email(self): <NEW_LINE> <INDENT> return self._client.call_api( method="post", path="/v3/accounts/{account_id}/users/{user_id}/validate-email", content_type="application/json", path_params={"account_id": self._account_id.to_api(), "user_id": self._id.to_api()}, unpack=self, ) | Validate the user email.
`REST API Documentation <https://os.mbed.com/search/?q=Service+API+References+/v3/accounts/{account_id}/users/{user_id}/validate-email>`_.
:rtype: SubtenantUser | 625941b6c4546d3d9de7284f |
def onDrawGrid(self, *args): <NEW_LINE> <INDENT> self.draw_grid = conf.get("drawGrid") | Checks the configuration / preferences to see if the board
grid should be displayed. | 625941b631939e2706e4cc8f |
def normal(train, test): <NEW_LINE> <INDENT> model = Normalizer() <NEW_LINE> model.fit(train) <NEW_LINE> prod = model.transform(test) <NEW_LINE> print(prod) | 正则化 | 625941b6aad79263cf390859 |
def serialize(self, buff): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> length = len(self.ints) <NEW_LINE> buff.write(_struct_I.pack(length)) <NEW_LINE> pattern = '<%si'%length <NEW_LINE> buff.write(struct.pack(pattern, *self.ints)) <NEW_LINE> length = len(self.floats) <NEW_LINE> buff.write(_struct_I.pack(length)) <NEW_LINE> pattern = '<%sf'%length <NEW_LINE> buff.write(struct.pack(pattern, *self.floats)) <NEW_LINE> length = len(self.strings) <NEW_LINE> buff.write(_struct_I.pack(length)) <NEW_LINE> for val1 in self.strings: <NEW_LINE> <INDENT> length = len(val1) <NEW_LINE> if python3 or type(val1) == unicode: <NEW_LINE> <INDENT> val1 = val1.encode('utf-8') <NEW_LINE> length = len(val1) <NEW_LINE> <DEDENT> buff.write(struct.pack('<I%ss'%length, length, val1)) <NEW_LINE> <DEDENT> <DEDENT> except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self))))) <NEW_LINE> except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self))))) | serialize message into buffer
:param buff: buffer, ``StringIO`` | 625941b6e64d504609d7465f |
def applyF_filterG(L, f, g): <NEW_LINE> <INDENT> for elem in L: <NEW_LINE> <INDENT> if g(f(elem))==False: <NEW_LINE> <INDENT> print(g(f(elem))) <NEW_LINE> L.remove(elem) <NEW_LINE> print(L) <NEW_LINE> <DEDENT> <DEDENT> if L==[]: <NEW_LINE> <INDENT> return -1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> L.sort() <NEW_LINE> return L[len(L)-1] | Assumes L is a list of integers
Assume functions f and g are defined for you.
f takes in an integer, applies a function, returns another integer
g takes in an integer, applies a Boolean function,
returns either True or False
Mutates L such that, for each element i originally in L, L contains
i if g(f(i)) returns True, and no other elements
Returns the largest element in the mutated L or -1 if the list is empty | 625941b6b5575c28eb68de1c |
def set_T(self, u0): <NEW_LINE> <INDENT> num = np.min((self.dx,self.dy)) <NEW_LINE> denom = 2*torch.max(np.sqrt(self.dfdu(u0)**2 + self.dgdu(u0)**2)) <NEW_LINE> self.T = float(np.random.rand(1)*float(num/denom)) <NEW_LINE> del num, denom | Courant coefficient = 0.5 | 625941b6a17c0f6771cbde73 |
def max_list_iter(int_list): <NEW_LINE> <INDENT> if int_list == None: <NEW_LINE> <INDENT> raise ValueError <NEW_LINE> <DEDENT> max_int = None <NEW_LINE> for i in range(len(int_list)): <NEW_LINE> <INDENT> if i == 0 or int_list[i] > max_int: <NEW_LINE> <INDENT> max_int = int_list[i] <NEW_LINE> <DEDENT> <DEDENT> return max_int | finds the max of a list of numbers and returns the value (not the index)
If int_list is empty, returns None. If list is None, raises ValueError | 625941b6e5267d203edcdac0 |
def test_parse_samplesheet(self): <NEW_LINE> <INDENT> with self.assertRaises(IOError): <NEW_LINE> <INDENT> HiSeqRun.parse_samplesheet(os.path.join(self.rootdir,'non-existing-samplesheet')) <NEW_LINE> <DEDENT> sdata = td.generate_samplesheet_data() <NEW_LINE> samplesheet = os.path.join(self.rootdir,'SampleSheet.csv') <NEW_LINE> HiSeqRun.write_samplesheet(sdata,samplesheet) <NEW_LINE> with open(samplesheet) as fh: <NEW_LINE> <INDENT> self.assertListEqual(HiSeqRun._samplesheet_header(), fh.next().strip().split(","), "Written header does not match expected header") <NEW_LINE> for entry in sdata: <NEW_LINE> <INDENT> self.assertListEqual([str(e) for e in entry], fh.next().strip().split(","), "Written data row does not match entry in generated samplesheet") <NEW_LINE> <DEDENT> with self.assertRaises(StopIteration): <NEW_LINE> <INDENT> fh.next() <NEW_LINE> <DEDENT> <DEDENT> data = HiSeqRun.parse_samplesheet(samplesheet) <NEW_LINE> self.assertEqual(len(sdata), len(data), "Number of parsed entries does not match number of generated entries") <NEW_LINE> for d in data: <NEW_LINE> <INDENT> self.assertListEqual([str(e) for e in sdata.pop(0)], [d[col] for col in HiSeqRun._samplesheet_header()], "Parsed data row does not match entry in generated samplesheet") <NEW_LINE> <DEDENT> lanes = list(set([d["Lane"] for d in data])) <NEW_LINE> obs_lane_data = HiSeqRun.parse_samplesheet(samplesheet,lane=lanes[-1]) <NEW_LINE> exp_lane_data = [d for d in data if str(d["Lane"]) == str(lanes[-1])] <NEW_LINE> self.assertListEqual(sorted(obs_lane_data), sorted(exp_lane_data), "Parsed data row does not match entry in generated samplesheet") | Write and parse a csv-file
| 625941b61d351010ab85593d |
def _get_orders(self, stra): <NEW_LINE> <INDENT> return [] | 获取策略相关委托,返回[] | 625941b6a934411ee37514ba |
def test_03_add_queue(self): <NEW_LINE> <INDENT> with self.assertLogs(logger=logger, level='INFO'): <NEW_LINE> <INDENT> add_queue(self.test_video_id) <NEW_LINE> res = add_queue(self.test_video_id) <NEW_LINE> self.assertEqual(res, False) | Assert duplicate video id logs an info
message and returns false | 625941b626238365f5f0ec88 |
def updata_tel(self,tel,file_name,sheet_name): <NEW_LINE> <INDENT> wb=load_workbook(file_name) <NEW_LINE> sheet=wb[sheet_name] <NEW_LINE> sheet.cell(2,1).value=tel <NEW_LINE> wb.save(file_name) | 更新Excel中的手机号 | 625941b6e1aae11d1e749ad2 |
def __cmp__(self, other): <NEW_LINE> <INDENT> return self.dgram == other.dgram | Compare two bundles
Args:
other (OSCBundle): other bundle to compare | 625941b6aad79263cf39085a |
def test_uninstall(sp_dir, conda_pth, request): <NEW_LINE> <INDENT> if not exists(conda_pth): <NEW_LINE> <INDENT> for pth in _path_in_dev_mode: <NEW_LINE> <INDENT> write_to_conda_pth(sp_dir, pth) <NEW_LINE> <DEDENT> <DEDENT> for to_rm, exp_num_pths in _torm_and_num_after_uninstall: <NEW_LINE> <INDENT> _uninstall(sp_dir, to_rm) <NEW_LINE> assert exists(conda_pth) <NEW_LINE> with open(conda_pth, 'r') as f: <NEW_LINE> <INDENT> lines = f.readlines() <NEW_LINE> assert to_rm + '\n' not in lines <NEW_LINE> assert len(lines) == exp_num_pths | `conda develop --uninstall pkg_path` invokes uninstall() to remove path
from conda.pth - this is a unit test for uninstall
It also includes a cleanup function that deletes the conda.pth file
:param str sp_dir: path to site-packages directory returned by fixture
:param str conda_pth: path to conda.pth returned by fixture | 625941b660cbc95b062c6368 |
def _maybe_pubsub_notify_via_tq(result_summary, request): <NEW_LINE> <INDENT> assert ndb.in_transaction() <NEW_LINE> assert isinstance( result_summary, task_result.TaskResultSummary), result_summary <NEW_LINE> assert isinstance(request, task_request.TaskRequest), request <NEW_LINE> if request.pubsub_topic: <NEW_LINE> <INDENT> task_id = task_pack.pack_result_summary_key(result_summary.key) <NEW_LINE> ok = utils.enqueue_task( url='/internal/taskqueue/pubsub/%s' % task_id, queue_name='pubsub', transactional=True, payload=utils.encode_to_json({ 'task_id': task_id, 'topic': request.pubsub_topic, 'auth_token': request.pubsub_auth_token, 'userdata': request.pubsub_userdata, })) <NEW_LINE> if not ok: <NEW_LINE> <INDENT> raise datastore_utils.CommitError('Failed to enqueue task') | Examines result_summary and enqueues a task to send PubSub message.
Must be called within a transaction.
Raises CommitError on errors (to abort the transaction). | 625941b6e1aae11d1e749ad3 |
def json_string(input_data, flag): <NEW_LINE> <INDENT> if flag == "true": <NEW_LINE> <INDENT> input_data = json.dumps(input_data, ensure_ascii=False, separators=(',', ':')) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> input_data = json.dumps(input_data, ensure_ascii=False) <NEW_LINE> <DEDENT> return input_data | Less time consuming
:param input_data:
:return: | 625941b6ad47b63b2c509da9 |
@home.route('/tasks/edit/<int:id>', methods=['GET', 'POST']) <NEW_LINE> @login_required <NEW_LINE> def edit_task(id): <NEW_LINE> <INDENT> add_task = False <NEW_LINE> task = Task.query.get_or_404(id) <NEW_LINE> form = TaskForm(obj=task) <NEW_LINE> if form.validate_on_submit(): <NEW_LINE> <INDENT> task.name = form.name.data <NEW_LINE> task.description = form.description.data <NEW_LINE> db.session.add(task) <NEW_LINE> db.session.commit() <NEW_LINE> flash('You have successfully edited the task.') <NEW_LINE> return redirect(url_for('home.list_tasks', id =id)) <NEW_LINE> <DEDENT> form.taskdescription.data = task.description <NEW_LINE> form.taskname.data = task.taskname <NEW_LINE> return render_template('home/tasks/addedittask.html', add_task=add_task, form=form, title="Edit Task") | Edit a task | 625941b623e79379d52ee387 |
def matrix_scalar_multiply(matrix, scalar): <NEW_LINE> <INDENT> return [[each*scalar for each in matrix[i]] for i in range(len(matrix))] | [[a b] * Z = [[a*Z b*Z]
[c d]] [c*Z d*Z]]
Matrix * Scalar = Matrix | 625941b6bde94217f3682c1c |
def record_log(self,action_obj,action_operation,host_id,trigger_data): <NEW_LINE> <INDENT> models.EventLog.objects.create( event_type = 0, host_id = host_id, trigger_id = trigger_data.get('trigger_id'), log = trigger_data ) | record alert log into DB | 625941b6507cdc57c6306af2 |
def longestPalindrome(self, s): <NEW_LINE> <INDENT> freq = defaultdict(int) <NEW_LINE> for c in s: <NEW_LINE> <INDENT> freq[c] += 1 <NEW_LINE> <DEDENT> count = 0 <NEW_LINE> hasOdd = False <NEW_LINE> for k, v in freq.items(): <NEW_LINE> <INDENT> if v % 2 != 0: <NEW_LINE> <INDENT> if hasOdd: <NEW_LINE> <INDENT> count -= 1 <NEW_LINE> <DEDENT> hasOdd = True <NEW_LINE> <DEDENT> count += v <NEW_LINE> <DEDENT> return count | :type s: str
:rtype: int | 625941b6377c676e91271fca |
def post(self): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> user = User.query.filter_by(email=request.data['email']).first() <NEW_LINE> if user and user.password_is_valid(request.data['password']): <NEW_LINE> <INDENT> access_token = user.generate_token(user.user_id) <NEW_LINE> if access_token: <NEW_LINE> <INDENT> response = { 'message': 'You logged in successfully', 'access_token': access_token.decode() } <NEW_LINE> return make_response(jsonify(response)), 200 <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> response = { 'message': 'Invalid email or password. Please try again.' } <NEW_LINE> return make_response(jsonify(response)), 200 <NEW_LINE> <DEDENT> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> response = { 'message': str(e) } <NEW_LINE> return make_response(jsonify(response)), 500 | Handle POST requests for URL-- auth/login | 625941b68e71fb1e9831d5cd |
def _adjustDate(self, candidate=None, fixcentury=False, force4digit_year=False): <NEW_LINE> <INDENT> if candidate is None: text = self._GetValue() <NEW_LINE> else: text = candidate <NEW_LINE> if self._datestyle == "YMD": <NEW_LINE> <INDENT> year_field = 0 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> year_field = 2 <NEW_LINE> <DEDENT> year = _getYear( text, self._datestyle).replace(self._fields[year_field]._fillChar,"") <NEW_LINE> month = _getMonth( text, self._datestyle) <NEW_LINE> day = _getDay( text, self._datestyle) <NEW_LINE> yearVal = None <NEW_LINE> yearstart = self._dateExtent - 4 <NEW_LINE> if( len(year) < 4 and (fixcentury or force4digit_year or (self._GetInsertionPoint() > yearstart+1 and text[yearstart+2] == ' ') or (self._GetInsertionPoint() > yearstart+2 and text[yearstart+3] == ' ') ) ): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> yearVal = int(year) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> year = text[yearstart:self._dateExtent] <NEW_LINE> <DEDENT> <DEDENT> if len(year) < 4 and yearVal: <NEW_LINE> <INDENT> if len(year) == 2: <NEW_LINE> <INDENT> now = wx.DateTime.Now() <NEW_LINE> century = (now.GetYear() /100) * 100 <NEW_LINE> twodig_year = now.GetYear() - century <NEW_LINE> if abs(yearVal - twodig_year) > 50: <NEW_LINE> <INDENT> yearVal = (century - 100) + yearVal <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> yearVal = century + yearVal <NEW_LINE> <DEDENT> year = str( yearVal ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> year = "%04d" % yearVal <NEW_LINE> <DEDENT> if self._4digityear or force4digit_year: <NEW_LINE> <INDENT> text = _makeDate(year, month, day, self._datestyle, text) + text[self._dateExtent:] <NEW_LINE> <DEDENT> <DEDENT> return text | 'Fixes' a date control, expanding the year if it can.
Applies various self-formatting options. | 625941b6d268445f265b4c94 |
@click.command(name='clone-set') <NEW_LINE> @click.option('--name', required=True, prompt=True, type=click.Choice(config.get_set_names())) <NEW_LINE> def command(name): <NEW_LINE> <INDENT> set_ = config.get_set_or_die(name) <NEW_LINE> set_.clone() | Clones a specific sets | 625941b67b25080760e3927a |
def sentence_pointers(): <NEW_LINE> <INDENT> return list(set(sent['sentence'] for sent in psql_query("SELECT sentence FROM sentence_to_topic;"))) | Collects the locations of the sentences from the database. | 625941b67c178a314d6ef277 |
def file_elem(file_id=None, admid_elements=None, loctype=None, xlink_href=None, xlink_type=None, groupid=None, use=None): <NEW_LINE> <INDENT> _file = _element('file') <NEW_LINE> _file.set('ID', decode_utf8(file_id)) <NEW_LINE> admids = ' '.join([decode_utf8(a) for a in admid_elements]) <NEW_LINE> _file.set('ADMID', decode_utf8(admids)) <NEW_LINE> if groupid: <NEW_LINE> <INDENT> _file.set('GROUPID', decode_utf8(groupid)) <NEW_LINE> <DEDENT> if use: <NEW_LINE> <INDENT> _file.set('USE', decode_utf8(use)) <NEW_LINE> <DEDENT> _flocat = _element('FLocat', ns={'xlink': XLINK_NS}) <NEW_LINE> _flocat.set('LOCTYPE', decode_utf8(loctype)) <NEW_LINE> _flocat.set(xlink_ns('href'), decode_utf8(xlink_href)) <NEW_LINE> _flocat.set(xlink_ns('type'), decode_utf8(xlink_type)) <NEW_LINE> _file.append(_flocat) <NEW_LINE> return _file | Return the file element | 625941b6d164cc6175782b6d |
def run(self): <NEW_LINE> <INDENT> while self.workers: <NEW_LINE> <INDENT> p = self.queue.get() <NEW_LINE> if p is None: <NEW_LINE> <INDENT> self.workers -= 1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.output.append(p) <NEW_LINE> <DEDENT> <DEDENT> print(len("".join(c for (i, c) in (sorted if self.sorting else identity)(self.output)))) <NEW_LINE> print("Output thread terminating") | Extract items from the output queue aqnd print until all done. | 625941b6adb09d7d5db6c5b3 |
def register_handler(self, channel, handler): <NEW_LINE> <INDENT> if channel in self.registered_channels: <NEW_LINE> <INDENT> self.registered_channels[channel].append(handler) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.registered_channels[channel] = [handler] | Registers ``handler`` with given ``channel``
:param channel: Name of channel
:param handler: Coroutine that will handle notifications from
``channel``
:returns: None | 625941b60383005118ecf404 |
def birth_years(df): <NEW_LINE> <INDENT> earliest_born = int(df['birth_year'].min()) <NEW_LINE> latest_born = int(df['birth_year'].max()) <NEW_LINE> pop_birth_year = int(df['birth_year'].mode()) <NEW_LINE> print('The oldest users are born in {}.\nThe youngest users are born in {}.' '\nThe most popular birth year is {}.'.format(earliest_born, latest_born, pop_birth_year)) | Finds and prints the earliest (i.e. oldest user), most recent (i.e.
youngest user), and most popular birth years. | 625941b6b830903b967e9737 |
def pack_as_nest_dict(flat_d, nest_d): <NEW_LINE> <INDENT> nest_out_d = {} <NEW_LINE> for k, v in nest_d.items(): <NEW_LINE> <INDENT> if isinstance(v, dict): <NEW_LINE> <INDENT> v_flat = flatten_nest_dict(v) <NEW_LINE> sub_d = { k2: flat_d.pop("{}/{}".format(k, k2)) for k2, _ in v_flat.items() } <NEW_LINE> nest_out_d[k] = pack_as_nest_dict(sub_d, v) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> nest_out_d[k] = flat_d.pop(k) <NEW_LINE> <DEDENT> <DEDENT> if flat_d: <NEW_LINE> <INDENT> raise ValueError( "Flat dict strucure do not match the nested dict. Extra keys: " "{}".format(list(flat_d.keys()))) <NEW_LINE> <DEDENT> return nest_out_d | Pack a 1-lvl dict into a nested dict with same structure as `nest_d`. | 625941b6d18da76e235322f1 |
def remove_handler(self, handler, group=DEFAULT_GROUP): <NEW_LINE> <INDENT> if handler in self.handlers[group]: <NEW_LINE> <INDENT> self.handlers[group].remove(handler) <NEW_LINE> if not self.handlers[group]: <NEW_LINE> <INDENT> del self.handlers[group] <NEW_LINE> self.groups.remove(group) | Remove a handler from the specified group.
Args:
handler (:class:`viber.ext.Handler`): A Handler instance.
group (:obj:`object`, optional): The group identifier. Default is 0. | 625941b6a8370b77170526c0 |
def get_contents(url): <NEW_LINE> <INDENT> bf = visit_web(url) <NEW_LINE> content_div = bf.find('div', class_='secondLevelWrap borderE6') <NEW_LINE> content_kind = content_div.find_all('li') <NEW_LINE> content_url = [i.find('a').get('href') for i in content_kind[1: -1]] <NEW_LINE> name = [i.string for i in content_kind[1: -1]] <NEW_LINE> more_kind = content_kind[-1].find_all('a') <NEW_LINE> more_url = [j.get('href') for j in more_kind[1:]] <NEW_LINE> more_name = [j.string for j in more_kind[1:]] <NEW_LINE> content_url.extend(more_url) <NEW_LINE> name.extend(more_name) <NEW_LINE> result = {'url': content_url, 'name': name} <NEW_LINE> result = pd.DataFrame(result) <NEW_LINE> result.to_csv('yiou_content.csv', encoding='utf-8', index=False) | 获取网站目录url | 625941b6a8370b77170526c1 |
def gen_traindata_from_all(): <NEW_LINE> <INDENT> fc = FeatureContainer() <NEW_LINE> a_colls = ["19550517_a", "19551147_a", "19561087_a", "19553298_a"] <NEW_LINE> try: <NEW_LINE> <INDENT> for a_coll in a_colls: <NEW_LINE> <INDENT> count = 0 <NEW_LINE> print(a_coll) <NEW_LINE> tid = a_coll[:-2] <NEW_LINE> a_collection = db.get_collection(a_coll) <NEW_LINE> for adoc in a_collection.find({}, {'aid': 1}).sort([('aid', 1)]): <NEW_LINE> <INDENT> count += 1 <NEW_LINE> aid = adoc['aid'] <NEW_LINE> answer = StaticAnswer(tid, aid) <NEW_LINE> answer.load_from_raw() <NEW_LINE> answer.build_cand_edges() <NEW_LINE> fc.append(answer.gen_features(), answer.gen_target()) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> except: <NEW_LINE> <INDENT> print(count) <NEW_LINE> raise <NEW_LINE> <DEDENT> fc.dump(pickle_filename) <NEW_LINE> print(len(fc.features)) | 生成数据库中全部数据的 features, samples. 用于从无到有生成边,特征,所以只需调用一次 | 625941b6627d3e7fe0d68c6e |
def test_C4(self): <NEW_LINE> <INDENT> G = nx.cycle_graph(4) <NEW_LINE> b = nx.edge_betweenness_centrality(G, weight=None, normalized=True) <NEW_LINE> b_answer = {(0, 1): 2, (0, 3): 2, (1, 2): 2, (2, 3): 2} <NEW_LINE> for n in sorted(G.edges()): <NEW_LINE> <INDENT> assert_almost_equal(b[n], b_answer[n] / 6.0) | Edge betweenness centrality: C4 | 625941b62eb69b55b151c6ca |
def valuerefs(self): <NEW_LINE> <INDENT> if self._pending_removals: <NEW_LINE> <INDENT> self._commit_removals() <NEW_LINE> <DEDENT> return list(self.data.values()) | Return a list of weak references to the values.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the values around longer than needed. | 625941b663d6d428bbe4430f |
def __init__(self, local_xbee_device, x64bit_addr=None, x16bit_addr=None, node_id=None): <NEW_LINE> <INDENT> if local_xbee_device.get_protocol() != XBeeProtocol.RAW_802_15_4: <NEW_LINE> <INDENT> raise XBeeException("Invalid protocol.") <NEW_LINE> <DEDENT> super().__init__(local_xbee_device, x64bit_addr, x16bit_addr, node_id=node_id) | Class constructor. Instantiates a new :class:`.RemoteXBeeDevice` with the provided parameters.
Args:
local_xbee_device (:class:`.XBeeDevice`): the local XBee device associated with the remote one.
x64bit_addr (:class:`.XBee64BitAddress`): the 64-bit address of the remote XBee device.
x16bit_addr (:class:`.XBee16BitAddress`): the 16-bit address of the remote XBee device.
node_id (String, optional): the node identifier of the remote XBee device. Optional.
Raises:
XBeeException: if the protocol of ``local_xbee_device`` is invalid.
All exceptions raised by :class:`.RemoteXBeeDevice` constructor.
.. seealso::
| :class:`RemoteXBeeDevice`
| :class:`XBee16BitAddress`
| :class:`XBee64BitAddress`
| :class:`XBeeDevice` | 625941b60a50d4780f666caf |
def testChecksum(self): <NEW_LINE> <INDENT> fname=tempfile.mktemp(prefix='fitsio-Checksum-',suffix='.fits') <NEW_LINE> try: <NEW_LINE> <INDENT> with fitsio.FITS(fname,'rw',clobber=True) as fits: <NEW_LINE> <INDENT> fits.write_table(self.data, header=self.keys, extname='mytable') <NEW_LINE> fits[1].write_checksum() <NEW_LINE> fits[1].verify_checksum() <NEW_LINE> <DEDENT> <DEDENT> finally: <NEW_LINE> <INDENT> if os.path.exists(fname): <NEW_LINE> <INDENT> os.remove(fname) | test that checksumming works | 625941b694891a1f4081b8c8 |
def parse_type(s): <NEW_LINE> <INDENT> return message.parse_type(s, TYPES) | Parses the string to the correct response type | 625941b6a4f1c619b28afe62 |
def __enter__(self): <NEW_LINE> <INDENT> self.in_with_block = True <NEW_LINE> self._handler.run_command() <NEW_LINE> self.process = self._handler.process <NEW_LINE> return self | Open sub-process at the start of a with block. | 625941b6eab8aa0e5d26d97e |
def get_configurations(self, obj): <NEW_LINE> <INDENT> configs = obj.configs.all() <NEW_LINE> serializer = SimpleExportConfigSerializer(configs, many=True, context={'request': self.context['request']}) <NEW_LINE> return serializer.data | Return the configurations selected for this export. | 625941b65fdd1c0f98dc0051 |
def test_403s(app): <NEW_LINE> <INDENT> for url in ('dashboard/', 'dashboard/test/1/', 'dashboard/abc/def/'): <NEW_LINE> <INDENT> rv = app.get(phase2_url + url) <NEW_LINE> assert rv.status_code == 403 <NEW_LINE> rv = app.get(phase2_url + url, headers={'Cookie': 'session=asdf'}) <NEW_LINE> assert rv.status_code == 403 | These should return 403 instead of 404. | 625941b6091ae35668666d86 |
def _match_artist(self, artist): <NEW_LINE> <INDENT> if self._media_index is None: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> matches = self._media_index.lookup(artist=' '.join(artist)) <NEW_LINE> return len(matches) > 0 | @see MusicService._match_artist() | 625941b6fb3f5b602dac34ae |
def numTriplets(self, nums1: List[int], nums2: List[int]) -> int: <NEW_LINE> <INDENT> d1 = collections.defaultdict(int) <NEW_LINE> d2 = collections.defaultdict(int) <NEW_LINE> for i in nums1: <NEW_LINE> <INDENT> d1[i * i] += 1 <NEW_LINE> <DEDENT> for i in nums2: <NEW_LINE> <INDENT> d2[i * i] += 1 <NEW_LINE> <DEDENT> res = 0 <NEW_LINE> for i in range(len(nums1) - 1): <NEW_LINE> <INDENT> for j in range(i + 1, len(nums1)): <NEW_LINE> <INDENT> multip = nums1[i] * nums1[j] <NEW_LINE> if multip in d2: <NEW_LINE> <INDENT> res += d2[multip] <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> for i in range(len(nums2) - 1): <NEW_LINE> <INDENT> for j in range(i + 1, len(nums2)): <NEW_LINE> <INDENT> multip = nums2[i] * nums2[j] <NEW_LINE> if multip in d1: <NEW_LINE> <INDENT> res += d1[multip] <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return res | Time O(n^2)
Space O(n) | 625941b61b99ca400220a8d0 |
def cluster_replace_nodes(base, cluster_id, nodes, expected_status='SUCCEEDED', wait_timeout=None): <NEW_LINE> <INDENT> params = { 'replace_nodes': { 'nodes': nodes } } <NEW_LINE> res = base.client.cluster_replace_nodes('clusters', cluster_id, params=params) <NEW_LINE> action_id = res['location'].split('/actions/')[1] <NEW_LINE> res = base.client.wait_for_status('actions', action_id, expected_status, wait_timeout) <NEW_LINE> return res['body']['status_reason'] | Utility function that replace nodes of cluster. | 625941b650812a4eaa59c146 |
def _assert_xrai_correctness(self, xrai_out, is_flatten_segments): <NEW_LINE> <INDENT> xrai_attribution_mask = xrai_out.attribution_mask <NEW_LINE> xrai_segments = xrai_out.segments <NEW_LINE> self.assertAlmostEqual(self.ig_mask.max(axis=2).sum(), xrai_attribution_mask.sum(), msg='The sum of IG attribution (max along the color ' 'axis) should be equal to the sum of XRAI ' 'attribution.') <NEW_LINE> self.assertTrue( np.array_equal(self.ig_bl_1_attr, xrai_out.ig_attribution[0]), msg='IG values returned by IG and returned to the client do not match.') <NEW_LINE> self.assertTrue( np.array_equal(self.ig_bl_2_attr, xrai_out.ig_attribution[1]), msg='IG values returned by IG and returned to the client do not match.') <NEW_LINE> segment_masks = [] <NEW_LINE> if is_flatten_segments: <NEW_LINE> <INDENT> first_segment_id = xrai_segments.min() <NEW_LINE> last_segment_id = xrai_segments.max() <NEW_LINE> self.assertEqual(1, first_segment_id, msg='The first segment should' ' be assigned value "1".') <NEW_LINE> for segment_id in range(first_segment_id, last_segment_id + 1): <NEW_LINE> <INDENT> segment_masks.append(xrai_segments == segment_id) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> segment_masks = xrai_segments <NEW_LINE> <DEDENT> prev_seg_attr = np.inf <NEW_LINE> for i, segment_mask in enumerate(segment_masks): <NEW_LINE> <INDENT> segment_id = i + 1 <NEW_LINE> segment_attr = xrai_attribution_mask[segment_mask] <NEW_LINE> self.assertGreater(segment_mask.sum(), 0, msg='Segment {} of {} has zero area.'.format( segment_id, len(segment_masks))) <NEW_LINE> self.assertEqual(segment_attr.min(), segment_attr.max(), 'All attribution values within a single segment should ' 'be equal.') <NEW_LINE> segment_attr = segment_attr.max() <NEW_LINE> self.assertAlmostEqual(self.ig_mask.max(axis=2)[segment_mask].sum(), xrai_attribution_mask[segment_mask].sum(), msg='The sum of the XRAI attribution within a ' 'segment should be equal to the sum of IG ' 'attribution within the same segment.') <NEW_LINE> if i < len(segment_masks) - 1: <NEW_LINE> <INDENT> self.assertLessEqual(segment_attr, prev_seg_attr, 'Pixel attributions of a segment with higher id ' 'should be lower than pixel attributions of a ' 'segment with a lower id. Segment {}'.format( segment_id)) <NEW_LINE> <DEDENT> prev_seg_attr = segment_attr | Performs general XRAIOutput verification that is applicable for all XRAI results. | 625941b6a79ad161976cbf65 |
def test_newsapi(self): <NEW_LINE> <INDENT> dataRetrieval = NewsRetrieval() <NEW_LINE> print(dataRetrieval) <NEW_LINE> self.assertTrue(dataRetrieval.check_connectivity_using_key()) | Check the connectivity using the newsapi key. | 625941b615baa723493c3d92 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.