code
stringlengths
4
4.48k
docstring
stringlengths
1
6.45k
_id
stringlengths
24
24
def test_SetQueryParametersAction(self): <NEW_LINE> <INDENT> args = wc._parse_args(['--crawl-start-after', '2016-12-22T13:01:00', '--crawl-start-before', '2016-12-22T15:11:00', '-c']) <NEW_LINE> assert len(args.query_params) == 2 <NEW_LINE> assert args.query_params['crawl-start-after'] == '2016-12-22T13:01:00' <NEW_LINE> assert args.query_params['crawl-start-before'] == '2016-12-22T15:11:00'
Test that arguments passed with this action are in query_params.
625941b9009cb60464c63242
def release_zoom(self, event): <NEW_LINE> <INDENT> if event.button == 1: <NEW_LINE> <INDENT> super(NXNavigationToolbar, self).release_zoom(event) <NEW_LINE> self._update_release() <NEW_LINE> if self.plotview.ndim > 1 and self.plotview.label != "Projection": <NEW_LINE> <INDENT> self.plotview.tab_widget.setCurrentWidget(self.plotview.ptab) <NEW_LINE> <DEDENT> <DEDENT> elif event.button == 3: <NEW_LINE> <INDENT> if self.plotview.ndim == 1 or not event.inaxes: <NEW_LINE> <INDENT> self.home() <NEW_LINE> <DEDENT> elif (self.plotview.x and self.plotview.y and abs(event.x - self.plotview.x) < 5 and abs(event.y - self.plotview.y) < 5): <NEW_LINE> <INDENT> self.home(autoscale=False) <NEW_LINE> <DEDENT> elif self.plotview.xdata and self.plotview.ydata: <NEW_LINE> <INDENT> self.plotview.ptab.open_panel() <NEW_LINE> xmin, xmax = sorted([event.xdata, self.plotview.xdata]) <NEW_LINE> ymin, ymax = sorted([event.ydata, self.plotview.ydata]) <NEW_LINE> xp, yp = self.plotview.xaxis.dim, self.plotview.yaxis.dim <NEW_LINE> self.plotview.projection_panel.maxbox[xp].setValue(str(xmax)) <NEW_LINE> self.plotview.projection_panel.minbox[xp].setValue(str(xmin)) <NEW_LINE> self.plotview.projection_panel.maxbox[yp].setValue(str(ymax)) <NEW_LINE> self.plotview.projection_panel.minbox[yp].setValue(str(ymin)) <NEW_LINE> <DEDENT> <DEDENT> self.release(event)
The release mouse button callback in zoom to rect mode.
625941b94f88993c3716befa
def weight_to_image_summary(weight, name=None, max_images=1): <NEW_LINE> <INDENT> with tf.name_scope('weight_summary'): <NEW_LINE> <INDENT> v = weight <NEW_LINE> iy, ix, channels, depth = v.get_shape().as_list() <NEW_LINE> cy = math.ceil(math.sqrt(depth)) <NEW_LINE> cx = cy <NEW_LINE> v = tf.pad(v, ((0,0), (0,0), (0,0), (0,cy*cx-depth))) <NEW_LINE> v = tf.reshape(v, (iy, ix, channels, cy, cx)) <NEW_LINE> v = tf.transpose(v, (3,0,4,1,2)) <NEW_LINE> v = tf.reshape(v, (1, cy*iy, cx*ix, channels)) <NEW_LINE> return tf.image_summary(name, v, max_images=max_images)
Use for first convolutional layer.
625941b94e4d5625662d4263
def event_m10_31_6190(): <NEW_LINE> <INDENT> assert event_m10_31_x57(z11=10313018, z12=619000) <NEW_LINE> EndMachine() <NEW_LINE> Quit()
Pillars and chairs destroyed 19
625941b98a349b6b435e7ffb
def check_type(self, value): <NEW_LINE> <INDENT> if isinstance(value, str): <NEW_LINE> <INDENT> return "Str" <NEW_LINE> <DEDENT> elif isinstance(value, bool): <NEW_LINE> <INDENT> return "Bool" <NEW_LINE> <DEDENT> elif isinstance(value, float): <NEW_LINE> <INDENT> return "Float" <NEW_LINE> <DEDENT> elif isinstance(value, int): <NEW_LINE> <INDENT> return "Int" <NEW_LINE> <DEDENT> elif not self.strict: <NEW_LINE> <INDENT> return "Undef" <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError("Not writable value")
Check type of value and return string of name Return the string of the name of the data type
625941b9d8ef3951e32433c4
def q(self): <NEW_LINE> <INDENT> print("Exiting program") <NEW_LINE> print(self.ftp.quit()) <NEW_LINE> quit()
Exit the program and close the connection usage: q
625941b9711fe17d825421f9
def test_include_trailing_comma(): <NEW_LINE> <INDENT> test_output_grid = SortImports( file_contents=SHORT_IMPORT, multi_line_output=WrapModes.GRID, line_length=40, include_trailing_comma=True, ).output <NEW_LINE> assert test_output_grid == ( "from third_party import (lib1, lib2,\n" " lib3, lib4,)\n" ) <NEW_LINE> test_output_vertical = SortImports( file_contents=SHORT_IMPORT, multi_line_output=WrapModes.VERTICAL, line_length=40, include_trailing_comma=True, ).output <NEW_LINE> assert test_output_vertical == ( "from third_party import (lib1,\n" " lib2,\n" " lib3,\n" " lib4,)\n" ) <NEW_LINE> test_output_vertical_indent = SortImports( file_contents=SHORT_IMPORT, multi_line_output=WrapModes.VERTICAL_HANGING_INDENT, line_length=40, include_trailing_comma=True, ).output <NEW_LINE> assert test_output_vertical_indent == ( "from third_party import (\n" " lib1,\n" " lib2,\n" " lib3,\n" " lib4,\n" ")\n" ) <NEW_LINE> test_output_vertical_grid = SortImports( file_contents=SHORT_IMPORT, multi_line_output=WrapModes.VERTICAL_GRID, line_length=40, include_trailing_comma=True, ).output <NEW_LINE> assert test_output_vertical_grid == ( "from third_party import (\n" " lib1, lib2, lib3, lib4,)\n" ) <NEW_LINE> test_output_vertical_grid_grouped = SortImports( file_contents=SHORT_IMPORT, multi_line_output=WrapModes.VERTICAL_GRID_GROUPED, line_length=40, include_trailing_comma=True, ).output <NEW_LINE> assert test_output_vertical_grid_grouped == ( "from third_party import (\n" " lib1, lib2, lib3, lib4,\n" ")\n" )
Test for the include_trailing_comma option
625941b94e696a04525c92d7
def predict(self, X): <NEW_LINE> <INDENT> check_is_fitted(self, ['clfs_', 'meta_clf_']) <NEW_LINE> return self._do_predict(X, self.meta_clf_.predict)
Predict target values for X. Parameters ---------- X : numpy array, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. Returns ---------- labels : array-like, shape = [n_samples] Predicted class labels.
625941b9ab23a570cc250006
def shuffle_field(): <NEW_LINE> <INDENT> global field <NEW_LINE> global field_origin <NEW_LINE> field = list(range(1,16)) <NEW_LINE> field += EMPTY_MARK <NEW_LINE> field_origin = tuple(field) <NEW_LINE> random.shuffle(field)
This method is used to create a field at the very start of the game. :return: list with 16 randomly shuffled tiles, one of which is a empty space.
625941b91d351010ab8559a4
def split_dataset(dataset: numpy.ndarray, train_size, look_back) -> (numpy.ndarray, numpy.ndarray): <NEW_LINE> <INDENT> if not train_size > look_back: <NEW_LINE> <INDENT> raise ValueError('train_size must be lager than look_back') <NEW_LINE> <DEDENT> train, test = dataset[0:train_size, :], dataset[train_size - look_back:len(dataset), :] <NEW_LINE> print('train_dataset: {}, test_dataset: {}'.format(len(train), len(test))) <NEW_LINE> return train, test
Splits dataset into training and test datasets. The last `look_back` rows in train dataset will be used as `look_back` for the test dataset. :param dataset: source dataset :param train_size: specifies the train data size :param look_back: number of previous time steps as int :return: tuple of training data and test dataset
625941b99c8ee82313fbb5fb
def Pixel_Ang2Pix(*args): <NEW_LINE> <INDENT> return _stomp.Pixel_Ang2Pix(*args)
Pixel_Ang2Pix(uint32_t const resolution, AngularCoordinate ang, uint32_t & pixnum)
625941b90a366e3fb873e69e
def longest_consec(strarr, num): <NEW_LINE> <INDENT> if not strarr or num < 1 or num > len(strarr): <NEW_LINE> <INDENT> return '' <NEW_LINE> <DEDENT> longest = 0 <NEW_LINE> longest_i = None <NEW_LINE> for i in range(len(strarr)): <NEW_LINE> <INDENT> length = sum(len(item) for item in strarr[i:i + num]) <NEW_LINE> if length > longest: <NEW_LINE> <INDENT> longest = length <NEW_LINE> longest_i = i <NEW_LINE> <DEDENT> <DEDENT> return ''.join(strarr[longest_i:longest_i + num])
Find longest consecutive string Args: strarr (list): List of strings num (int): Number of consecutive strings to found Returns: str Examples: >>> longest_consec(['a', 'bc', 'def'], 2) 'bcdef'
625941b93346ee7daa2b2bf0
def flush(name): <NEW_LINE> <INDENT> if Globals.flushed_paths: <NEW_LINE> <INDENT> return { "name": name, "changes": {}, "result": False, "comment": "Cannot flush twice." } <NEW_LINE> <DEDENT> filename = __salt__["pillar.get"]("system:path-profile", DEFAULT_PROFILE_PATH) <NEW_LINE> rendered_paths = '\n'.join([ 'PATH=$PATH:' + path for path in Globals.paths_to_include ]) <NEW_LINE> contents = PROFILE_TEMPLATE.format(PATHS=rendered_paths) <NEW_LINE> comment = "" <NEW_LINE> result = False <NEW_LINE> changes = { "new": contents, "old": None } <NEW_LINE> if os.path.exists(filename): <NEW_LINE> <INDENT> with open(filename) as f: <NEW_LINE> <INDENT> changes["old"] = f.read() <NEW_LINE> <DEDENT> <DEDENT> if changes["old"] == changes["new"]: <NEW_LINE> <INDENT> changes = {} <NEW_LINE> comment = "Nothing to change." <NEW_LINE> result = True <NEW_LINE> <DEDENT> elif __opts__["test"]: <NEW_LINE> <INDENT> result = None <NEW_LINE> comment = "Profile file not written." <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> with open(filename, "w") as f: <NEW_LINE> <INDENT> f.write(contents) <NEW_LINE> comment = "Updated profile file." <NEW_LINE> result = True <NEW_LINE> <DEDENT> <DEDENT> Globals.flushed_paths = True <NEW_LINE> return { "name": name, "result": result, "comment": comment, "changes": changes }
Flushes the changes to the PATH variable that were cumulated.
625941b9a934411ee3751521
def test_SubmissionStatus_json(): <NEW_LINE> <INDENT> status = SubmissionStatus( id="foo", etag="bar", submissionAnnotations={"foo": "baz"} ) <NEW_LINE> returned_json_str = status.json() <NEW_LINE> expected_status = { "etag": "bar", "id": "foo", "submissionAnnotations": { "annotations": { "foo": { "type": "STRING", "value": ["baz"] } }, "etag": "bar", "id": "foo" } } <NEW_LINE> expected_str = json.dumps(expected_status, sort_keys=True, indent=2, ensure_ascii=True) <NEW_LINE> assert returned_json_str == expected_str
Test the overloaded json to changes annotations to synapse style annotations
625941b91f037a2d8b946086
def model_fn_extra(estimator): <NEW_LINE> <INDENT> def _model_fn(features, labels, mode): <NEW_LINE> <INDENT> estimatorSpec = estimator._call_model_fn( features=features, labels=labels, mode=mode, config=estimator.config) <NEW_LINE> if estimatorSpec.export_outputs: <NEW_LINE> <INDENT> estimatorSpec.export_outputs['predict'] = tf.estimator.export.PredictOutput(estimatorSpec.predictions) <NEW_LINE> estimatorSpec.export_outputs['serving_default'] = tf.estimator.export.PredictOutput(estimatorSpec.predictions) <NEW_LINE> <DEDENT> tf.logging.info('\nestimatorSpec prediction keys: {}\n'.format(estimatorSpec.predictions.keys())) <NEW_LINE> return estimatorSpec <NEW_LINE> <DEDENT> return _model_fn
A function that takes a specified estimator and returns a function that in turn returns an EstimatorSpec. When the estimatorSpec's `export_outputs` is defined, it updates it to a PredictOutput created from the existing `predictions` dict. Intended to be passed as an arg to the `model_fn` arg in a `tf.estimator.Estimator(model_fn=...)` call.
625941b98da39b475bd64dfe
def parse_date_exif(date_string): <NEW_LINE> <INDENT> elements = str(date_string).strip().split() <NEW_LINE> if len(elements) < 1: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> date_entries = elements[0].split(':') <NEW_LINE> if len(date_entries) == 3 and date_entries[0] > '0000': <NEW_LINE> <INDENT> year = int(date_entries[0]) <NEW_LINE> month = int(date_entries[1]) <NEW_LINE> day = int(date_entries[2].split('.')[0]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> time_zone_adjust = False <NEW_LINE> hour = 12 <NEW_LINE> minute = 0 <NEW_LINE> second = 0 <NEW_LINE> if len(elements) > 1: <NEW_LINE> <INDENT> time_entries = re.split('(\+|-|Z)', elements[1]) <NEW_LINE> time = time_entries[0].split(':') <NEW_LINE> if len(time) == 3: <NEW_LINE> <INDENT> hour = int(time[0]) <NEW_LINE> minute = int(time[1]) <NEW_LINE> second = int(time[2].split('.')[0]) <NEW_LINE> <DEDENT> elif len(time) == 2: <NEW_LINE> <INDENT> hour = int(time[0]) <NEW_LINE> minute = int(time[1]) <NEW_LINE> <DEDENT> if len(time_entries) > 2: <NEW_LINE> <INDENT> time_zone = time_entries[2].split(':') <NEW_LINE> if len(time_zone) == 2: <NEW_LINE> <INDENT> time_zone_hour = int(time_zone[0]) <NEW_LINE> time_zone_min = int(time_zone[1]) <NEW_LINE> if time_entries[1] == '-': <NEW_LINE> <INDENT> time_zone_hour *= -1 <NEW_LINE> <DEDENT> dateadd = timedelta(hours=time_zone_hour, minutes=time_zone_min) <NEW_LINE> time_zone_adjust = True <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> try: <NEW_LINE> <INDENT> date = datetime(year, month, day, hour, minute, second) <NEW_LINE> <DEDENT> except ValueError: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> date.strftime('%Y/%m-%b') <NEW_LINE> <DEDENT> except ValueError: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> if time_zone_adjust: <NEW_LINE> <INDENT> date += dateadd <NEW_LINE> <DEDENT> return date
extract date info from EXIF data YYYY:MM:DD HH:MM:SS or YYYY:MM:DD HH:MM:SS+HH:MM or YYYY:MM:DD HH:MM:SS-HH:MM or YYYY:MM:DD HH:MM:SSZ
625941b907d97122c4178712
def get_last_build_for_dependency(dependency, previous_build=None): <NEW_LINE> <INDENT> if dependency.auto_track: <NEW_LINE> <INDENT> if previous_build: <NEW_LINE> <INDENT> return previous_build.build_dependencies.filter( projectdependency=dependency).first() <NEW_LINE> <DEDENT> <DEDENT> return dependency.current_build
Return the last known build for the provided ProjectDependency dependency, which is defined as the current build associated with itself if it's not auto-tracked, or the most recent build for auto-tracked cases.
625941b9adb09d7d5db6c619
def test_attach_book(self): <NEW_LINE> <INDENT> response = self.client.get('/attach_book/') <NEW_LINE> self.failUnlessEqual(response.status_code, 405)
Ensure the attach book page doesn't allow GET requests
625941b9099cdd3c635f0ae3
def test_portals_id_template_folders_fk_get(self): <NEW_LINE> <INDENT> pass
Test case for portals_id_template_folders_fk_get Find a related item by id for templateFolders.
625941b97047854f462a1294
def __str__(self): <NEW_LINE> <INDENT> return json.dumps(self._to_dict(), indent=2)
Return a `str` version of this DocumentSentimentResults object.
625941b9eab8aa0e5d26d9e5
def get_dict_of_struct(struct, connection=None, fetch_nested=False, attributes=None): <NEW_LINE> <INDENT> def remove_underscore(val): <NEW_LINE> <INDENT> if val.startswith('_'): <NEW_LINE> <INDENT> val = val[1:] <NEW_LINE> remove_underscore(val) <NEW_LINE> <DEDENT> return val <NEW_LINE> <DEDENT> res = {} <NEW_LINE> if struct is not None: <NEW_LINE> <INDENT> for key, value in struct.__dict__.items(): <NEW_LINE> <INDENT> nested = False <NEW_LINE> key = remove_underscore(key) <NEW_LINE> if value is None: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> elif isinstance(value, sdk.Struct): <NEW_LINE> <INDENT> res[key] = get_dict_of_struct(value) <NEW_LINE> <DEDENT> elif isinstance(value, Enum) or isinstance(value, datetime): <NEW_LINE> <INDENT> res[key] = str(value) <NEW_LINE> <DEDENT> elif isinstance(value, list) or isinstance(value, sdk.List): <NEW_LINE> <INDENT> if isinstance(value, sdk.List) and fetch_nested and value.href: <NEW_LINE> <INDENT> value = connection.follow_link(value) <NEW_LINE> nested = True <NEW_LINE> <DEDENT> res[key] = [] <NEW_LINE> for i in value: <NEW_LINE> <INDENT> if isinstance(i, sdk.Struct): <NEW_LINE> <INDENT> if not nested: <NEW_LINE> <INDENT> res[key].append(get_dict_of_struct(i)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> nested_obj = dict( (attr, getattr(i, attr)) for attr in attributes if getattr(i, attr, None) ) <NEW_LINE> nested_obj['id'] = getattr(i, 'id', None), <NEW_LINE> res[key].append(nested_obj) <NEW_LINE> <DEDENT> <DEDENT> elif isinstance(i, Enum): <NEW_LINE> <INDENT> res[key].append(str(i)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> res[key].append(i) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> res[key] = value <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return res
Convert SDK Struct type into dictionary.
625941b9b57a9660fec33707
def to_json(self): <NEW_LINE> <INDENT> new_dict = copy.copy(self.__dict__) <NEW_LINE> new_dict['__class__'] = self.__class__.__name__ <NEW_LINE> new_dict['created_at'] = str(self.updated_at) <NEW_LINE> new_dict['updated_at'] = str(self.created_at) <NEW_LINE> return new_dict
Creates and returns a dictionary which is json serializable
625941b921a7993f00bc7b71
def validate_report_name_unique(form, field): <NEW_LINE> <INDENT> report_slug = slugify(form.report_name.data) <NEW_LINE> try: <NEW_LINE> <INDENT> reports = query_item('trait', filters=[('report_slug', '=', report_slug)]) <NEW_LINE> if len(reports) > 0: <NEW_LINE> <INDENT> raise ValidationError(f"That report name is not available. Choose a unique report name") <NEW_LINE> <DEDENT> <DEDENT> except BadRequest: <NEW_LINE> <INDENT> raise ValidationError(f"Backend Error")
Checks to ensure that the report name submitted is unique.
625941b9cb5e8a47e48b7936
def find_match(hash): <NEW_LINE> <INDENT> print('Finding match for {}'.format(hash)) <NEW_LINE> matches = get_matching_list(hash[:5]) <NEW_LINE> compare_str = hash[5:].upper() <NEW_LINE> for line in matches.split('\r\n'): <NEW_LINE> <INDENT> (potential_match, count) = line.split(':') <NEW_LINE> if potential_match == compare_str: <NEW_LINE> <INDENT> return (potential_match, count) <NEW_LINE> <DEDENT> <DEDENT> return ('', 0)
Looks for a password hash from haveibeenpwned.com that matches thes provided hash. :param: hash: SHA1 hash :return: Tuple containing the matching hash and number of times it has been compromised.
625941b9ec188e330fd5a62d
def go_previous_popup(self): <NEW_LINE> <INDENT> if self.navstack: <NEW_LINE> <INDENT> self.queue.insert(1, self.navstack.pop()) <NEW_LINE> <DEDENT> return True
Go to previous menu in navstack.
625941b93d592f4c4ed1cf06
def checkTemplateImplementationVersion(self, edgvVersion = None): <NEW_LINE> <INDENT> if not edgvVersion: <NEW_LINE> <INDENT> edgvVersion = self.getDatabaseVersion() <NEW_LINE> <DEDENT> templateName = self.getTemplateName(edgvVersion) <NEW_LINE> fileImplementationVersion = self.getImplementationVersionFromFile(edgvVersion) <NEW_LINE> templateImplementationVersion = self.getImplementationVersion() <NEW_LINE> if templateImplementationVersion < fileImplementationVersion: <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return False
Returns True if templateSql version is larger than installed template Works when abstractDb is connected to the template
625941b9627d3e7fe0d68cd6
def is_part_of(self, element): <NEW_LINE> <INDENT> return element.get_element_index() in self._part_of_adjacency
Returns true if the current RelationshipElement is part of the given element :param element: a RelationshipElement (not an index) :return: true if the current RelationshipElement is part of the given element
625941b9e1aae11d1e749b3b
def pc_input_buffers_full_avg(self, *args): <NEW_LINE> <INDENT> return _PHY_swig.wave_to_float_single_cpp_sptr_pc_input_buffers_full_avg(self, *args)
pc_input_buffers_full_avg(wave_to_float_single_cpp_sptr self, int which) -> float pc_input_buffers_full_avg(wave_to_float_single_cpp_sptr self) -> pmt_vector_float
625941b923e79379d52ee3ef
def __init__(self,datarate,quantasize,chunkrate): <NEW_LINE> <INDENT> super(RateChunker,self).__init__() <NEW_LINE> self.datarate = datarate <NEW_LINE> self.quanta = quantasize <NEW_LINE> self.chunkrate = chunkrate <NEW_LINE> self.remainder = "" <NEW_LINE> self.shutdownMsg = None <NEW_LINE> self.canStop = False <NEW_LINE> self.mustStop = False
x.__init__(...) initializes x; see x.__class__.__doc__ for signature
625941b971ff763f4b549516
def validate_makeup(xs): <NEW_LINE> <INDENT> if isinstance(xs, float) and np.isnan(xs): <NEW_LINE> <INDENT> return '*Missing*' <NEW_LINE> <DEDENT> word_num = len(xs.split()) <NEW_LINE> return 'Complete' if word_num >= 100 else '*Incomplete*'
The makeup assignment submission should be between 200 and 400 words.
625941b9a17c0f6771cbdedb
def test_replace_tokens_multiple_no_tokens(self): <NEW_LINE> <INDENT> token_vals = { } <NEW_LINE> test = "Where is my {one} {two}?!?" <NEW_LINE> expected = "Where is my {one} {two}?!?" <NEW_LINE> received = lib.replace_tokens(test, token_vals) <NEW_LINE> self.assertEqual(expected, received)
supply replace tokens with a string that has multiple distinct tokens that are not in the supplied token dictionary
625941b9d18da76e23532359
def test_assignments_service(parser): <NEW_LINE> <INDENT> tree = parser.parse('a = alpine echo') <NEW_LINE> result = Compiler.compile(tree) <NEW_LINE> assert result['tree']['1']['method'] == 'execute' <NEW_LINE> assert result['tree']['1']['name'] == ['a']
Ensures that service assignments are compiled correctly
625941b98e7ae83300e4ae53
def cast_single_ray(self, ray_angle, level=None, origin=None, target=None, depth=None): <NEW_LINE> <INDENT> origin = origin if origin else self.player.position <NEW_LINE> level = level if level else self.level <NEW_LINE> depth = depth if depth else self.depth <NEW_LINE> target = target if target else self.level.wall_chars <NEW_LINE> ray = raycast.Vector(origin, ray_angle, 0.0) <NEW_LINE> while ray.length < depth: <NEW_LINE> <INDENT> ray.length += 1 <NEW_LINE> test_point = ray.end_point <NEW_LINE> if not level.point_is_present(test_point): <NEW_LINE> <INDENT> ray.length = depth <NEW_LINE> <DEDENT> elif level.check_cell(test_point, target): <NEW_LINE> <INDENT> ray.length -= 1 <NEW_LINE> self._precise_ray(ray, 0.1, target, depth=depth) <NEW_LINE> ray.length -= 0.1 <NEW_LINE> self._precise_ray(ray, 0.01, target, depth=depth) <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> return ray
Метод вернёт вектор, направленный в сторону ray_angle; модуль вектора будет равняться расстоянию от origin до target, если была найдена коллизия с целевым блоком, либо depth, если коллизии не было. По-умолчанию метод ищет коллизию со стеной на расстоянии не более глубины прорисовки
625941b92eb69b55b151c732
def _optimize(func, optimize=True): <NEW_LINE> <INDENT> func = func.with_attr("Compiler", "ethos-u") <NEW_LINE> mod = tvm.IRModule.from_expr(func) <NEW_LINE> mod = relay.transform.InferType()(mod) <NEW_LINE> if optimize: <NEW_LINE> <INDENT> mod = LayoutOptimizer()(mod) <NEW_LINE> <DEDENT> entry = mod["main"] <NEW_LINE> return entry if isinstance(func, relay.Function) else entry.body
Create IRModule and run layout optimizer pass.
625941b9004d5f362079a1be
def ngram_filter(myvocab, threshold=90, verbose=False): <NEW_LINE> <INDENT> lengths = np.array([len(l) for l in myvocab]) <NEW_LINE> minlengthreshold = np.percentile(lengths, 1) <NEW_LINE> for i in (70, 65, 60, 55, 50, 45, 40): <NEW_LINE> <INDENT> maxlengthreshold = np.percentile(lengths, i) <NEW_LINE> mytokens = [t for t in myvocab if minlengthreshold <= len(t) <= maxlengthreshold] <NEW_LINE> if len(mytokens) <= MAX_NGRAM_VOC: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> <DEDENT> if len(mytokens) > MAX_NGRAM_VOC: <NEW_LINE> <INDENT> print('Vocabulary size too large, skipping n-gram filtering') <NEW_LINE> return myvocab <NEW_LINE> <DEDENT> old_len = len(myvocab) <NEW_LINE> max_exp = 21 <NEW_LINE> vectorizer = CountVectorizer(analyzer='char', max_features=2 ** max_exp, ngram_range=(1,4), strip_accents=None, lowercase=True, max_df=1.0) <NEW_LINE> firstset = set(compute_deletions(mytokens, vectorizer, threshold)) <NEW_LINE> vectorizer = TfidfVectorizer(analyzer='char', max_features=2 ** max_exp, ngram_range=(1,4), strip_accents=None, lowercase=True, max_df=1.0, sublinear_tf=True, binary=True) <NEW_LINE> secondset = set(compute_deletions(mytokens, vectorizer, threshold)) <NEW_LINE> for token in firstset.intersection(secondset): <NEW_LINE> <INDENT> del myvocab[token] <NEW_LINE> <DEDENT> if verbose is True: <NEW_LINE> <INDENT> print(sorted(firstset.intersection(secondset))) <NEW_LINE> <DEDENT> print_changes('ngrams', old_len, len(myvocab)) <NEW_LINE> return myvocab
Find dissimilar tokens based on character n-gram occurrences.
625941b9d6c5a10208143ecf
def click_beiandengji(self): <NEW_LINE> <INDENT> self.click(self.beiandengji_loc)
点击车辆备案登记-备案登记
625941b9f548e778e58cd403
def _copy_attribute_manual(self, inst, obj, spec): <NEW_LINE> <INDENT> raise NotImplementedError()
Hook that is called in :py:meth:`copy` to invoke the manual copying of an object *obj* **after** the copied instance *inst* was created. *spec* is the associated :py:class:`CopySpec` object. Instead of returning the copied object, the method should directly alter *inst*.
625941b9ab23a570cc250007
def load(self): <NEW_LINE> <INDENT> if self.basis_atom_map is not None: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> if self.filename.endswith('.nwchem'): <NEW_LINE> <INDENT> self.basis_atom_map = load_basis_atom_map_nwchem(self.filename) <NEW_LINE> <DEDENT> elif self.filename.endswith('.gbs'): <NEW_LINE> <INDENT> self.basis_atom_map = load_basis_atom_map_gbs(self.filename) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise IOError(f'File format not supported: {self.filename}') <NEW_LINE> <DEDENT> self._to_arrays() <NEW_LINE> self._to_segmented() <NEW_LINE> self._normalize_contractions()
Load the basis set from file if it hasn't been done already. If the basis_atom_map is already defined (not None), then the load method is ignored.
625941b976d4e153a657e9b8
def pause(self): <NEW_LINE> <INDENT> pass
void Phonon.MediaObject.pause()
625941b9090684286d50eb68
def get_error(self): <NEW_LINE> <INDENT> return self.queue.rpop(self.conf.queue_key)
Get the next error to be categorised
625941b95e10d32532c5edb6
def row_value_by_casename(fp, sheet_name, case_name): <NEW_LINE> <INDENT> test_data = xlrd.open_workbook(fp, 'br') <NEW_LINE> sheet_name = test_data.sheet_by_name('%s' % sheet_name) <NEW_LINE> cases = sheet_name.col_values(0) <NEW_LINE> for case_i in range(len(cases)): <NEW_LINE> <INDENT> if cases[case_i] == case_name: <NEW_LINE> <INDENT> row_value = sheet_name.row_values(case_i) <NEW_LINE> for i in range(0, row_value.__len__()): <NEW_LINE> <INDENT> if type(row_value[i]) == float: <NEW_LINE> <INDENT> row_value[i] = int(row_value[i]) <NEW_LINE> <DEDENT> <DEDENT> return row_value
按用例名获取整行的值
625941b9c4546d3d9de728b8
def get_domain(fqdn): <NEW_LINE> <INDENT> if not tld_names: <NEW_LINE> <INDENT> __init() <NEW_LINE> <DEDENT> domain_parsed = urlparse(fqdn) <NEW_LINE> if domain_parsed.netloc: <NEW_LINE> <INDENT> domain = domain_parsed.netloc.split(":", 1)[0] <NEW_LINE> <DEDENT> elif domain_parsed.path: <NEW_LINE> <INDENT> domain = domain_parsed.path.split("@", 1)[0] <NEW_LINE> domain = domain.split("/", 1)[0] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> BadUrl(url=fqdn) <NEW_LINE> <DEDENT> if not domain: <NEW_LINE> <INDENT> BadUrl(url=fqdn) <NEW_LINE> <DEDENT> parts = domain.split(".") <NEW_LINE> for i in range(len(parts)): <NEW_LINE> <INDENT> d = ".".join(parts[i:]) <NEW_LINE> if d in tld_names: <NEW_LINE> <INDENT> return ".".join(parts[i-1:]) if i > 0 else ".".join(parts) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> raise DomainNotFound(fqdn)
extracts the domain and top level domain from a given fqdn input: fqdn fully qualified domain name to extract domain from output: domain domain and top level domain
625941b93eb6a72ae02ec35f
def copy_any(source_path, target_path): <NEW_LINE> <INDENT> from shutil import copy2, copytree <NEW_LINE> if os.path.isdir(source_path): <NEW_LINE> <INDENT> copytree(source_path, target_path) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> copy2(source_path, target_path)
Copy a file or directory
625941b9796e427e537b044a
def is_valid_fw(filename): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> name, ext = filename.split('.') <NEW_LINE> assert ext in file_update_allowed_ext <NEW_LINE> <DEDENT> except (ValueError, AssertionError): <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> if '_m' in name: <NEW_LINE> <INDENT> with open(filename, 'r') as bin: <NEW_LINE> <INDENT> fbody = bin.read() <NEW_LINE> <DEDENT> m = fw_md5(fbody) <NEW_LINE> return True if name.endswith('_m%s' % m) else False <NEW_LINE> <DEDENT> return False
校验文件名中的md5值是否与文件对应
625941b982261d6c526ab32b
def load_roles(self, filename): <NEW_LINE> <INDENT> r = {} <NEW_LINE> if os.path.isfile(os.path.join("/etc/mplane/", filename)): <NEW_LINE> <INDENT> filepath = os.path.join("/etc/mplane/", filename) <NEW_LINE> <DEDENT> elif os.path.isfile(os.path.join(os.environ['HOME'], filename)): <NEW_LINE> <INDENT> filepath = os.path.join(os.environ['HOME'], filename) <NEW_LINE> <DEDENT> elif ((os.getenv('MPLANE_CONF_DIR', default=None) is not None) and (os.path.isfile(os.path.join(os.getenv('MPLANE_CONF_DIR', default=None), filename)))): <NEW_LINE> <INDENT> filepath = os.path.join(os.getenv('MPLANE_CONF_DIR', default=None), filename) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise OSError("File " + filename + " not found. Retry setting $MPLANE_CONF_DIR") <NEW_LINE> <DEDENT> with open(filepath) as f: <NEW_LINE> <INDENT> for line in f.readlines(): <NEW_LINE> <INDENT> line = line.rstrip('\n') <NEW_LINE> if line[0] != '#': <NEW_LINE> <INDENT> user = line.split(': ')[0] <NEW_LINE> roles = set(line.split(': ')[1].split(', ')) <NEW_LINE> r[user] = roles <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return r
Loads user-role-capability associations and keeps them in cache
625941b9f7d966606f6a9e90
def input_amount(): <NEW_LINE> <INDENT> global amount_options <NEW_LINE> os.system('cls') <NEW_LINE> print('What is the donation amount?') <NEW_LINE> for num,option in enumerate(amount_options): <NEW_LINE> <INDENT> print('{} - '.format(num)+option[0]) <NEW_LINE> <DEDENT> print('9 - Cancel') <NEW_LINE> try: <NEW_LINE> <INDENT> choice = int(input()) <NEW_LINE> if not choice in [0,1,2,3,9]: <NEW_LINE> <INDENT> choice = input_amount() <NEW_LINE> <DEDENT> <DEDENT> except: <NEW_LINE> <INDENT> choice = input_amount() <NEW_LINE> <DEDENT> return choice
description: print out message and input amount choice 9 is Cencel param {type} return: choice {int}
625941b991af0d3eaac9b89c
def summarize_data(self, summary_length_bytes=8): <NEW_LINE> <INDENT> if not self.data: <NEW_LINE> <INDENT> return "" <NEW_LINE> <DEDENT> summary_hex = binascii.hexlify(self.data[0:summary_length_bytes]).decode('utf-8') <NEW_LINE> raw_hex = ' '.join(summary_hex[i:i + 2] for i in range(0, len(summary_hex), 2)) <NEW_LINE> continuation = '...' if len(self.data) > summary_length_bytes else '' <NEW_LINE> return "{}{}".format(raw_hex, continuation)
Returns a quick summary of the given packet's data.
625941b9be7bc26dc91cd48d
def findBalanced(text, openDelim=['[['], closeDelim=[']]']): <NEW_LINE> <INDENT> openPat = '|'.join([re.escape(x) for x in openDelim]) <NEW_LINE> afterPat = {o: re.compile(openPat + '|' + c, re.DOTALL) for o, c in izip(openDelim, closeDelim)} <NEW_LINE> stack = [] <NEW_LINE> start = 0 <NEW_LINE> cur = 0 <NEW_LINE> startSet = False <NEW_LINE> startPat = re.compile(openPat) <NEW_LINE> nextPat = startPat <NEW_LINE> while True: <NEW_LINE> <INDENT> next = nextPat.search(text, cur) <NEW_LINE> if not next: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> if not startSet: <NEW_LINE> <INDENT> start = next.start() <NEW_LINE> startSet = True <NEW_LINE> <DEDENT> delim = next.group(0) <NEW_LINE> if delim in openDelim: <NEW_LINE> <INDENT> stack.append(delim) <NEW_LINE> nextPat = afterPat[delim] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> opening = stack.pop() <NEW_LINE> if stack: <NEW_LINE> <INDENT> nextPat = afterPat[stack[-1]] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> yield start, next.end() <NEW_LINE> nextPat = startPat <NEW_LINE> start = next.end() <NEW_LINE> startSet = False <NEW_LINE> <DEDENT> <DEDENT> cur = next.end()
Assuming that text contains a properly balanced expression using :param openDelim: as opening delimiters and :param closeDelim: as closing delimiters. :return: an iterator producing pairs (start, end) of start and end positions in text containing a balanced expression.
625941b9097d151d1a222ce3
def swift(self): <NEW_LINE> <INDENT> return Swift(swift_client.Connection( authurl=self.user["auth_url"], user=self.user["username"], key=self.user["password"], tenant_name=self.user["project_name"], auth_version=self.os_identity_api_version))
Create swift client.
625941b985dfad0860c3ace0
def _test_list_view_project(self): <NEW_LINE> <INDENT> view = ProjectListView.as_view() <NEW_LINE> request = self.request_factory.get( reverse('project_list') ) <NEW_LINE> request.user = self.user <NEW_LINE> response = view(request) <NEW_LINE> self.assertEqual(response.status_code, 200) <NEW_LINE> self.assertEqual(response.context_data['object_list'].count(), 4) <NEW_LINE> self.insert_project() <NEW_LINE> response = view(request) <NEW_LINE> self.assertEqual(response.context_data['object_list'].count(), 5)
Tests data: List
625941b956b00c62f0f144e6
def generate_parition_list(self, df: pd.DataFrame, partition_cols: list): <NEW_LINE> <INDENT> df_parts = df.drop_duplicates(subset=partition_cols) <NEW_LINE> table_info = self._get_table_info() <NEW_LINE> print(table_info) <NEW_LINE> partition_list = [] <NEW_LINE> for index, row in df_parts.iterrows(): <NEW_LINE> <INDENT> part_loc = [f"{col}={str(row[col])}" for col in partition_cols] <NEW_LINE> part_dict = { "Values": [str(row[col]) for col in partition_cols], "StorageDescriptor": { "Location": f"{table_info['table_location']}/{'/'.join(part_loc)}/", "InputFormat": table_info['input_format'], "OutputFormat": table_info['output_format'], "SerdeInfo": table_info['serde_info'], "Columns": table_info['columns'], }, } <NEW_LINE> partition_list.append(part_dict.copy()) <NEW_LINE> <DEDENT> return partition_list
Generate the list of partitions need to be added to the data catalog table. Args: df (pd.DataFrame): Pandas dataframe partition_cols (list): Partition columns passed as a list
625941b966673b3332b91f1f
def _parse_stop_type(self, string): <NEW_LINE> <INDENT> convert = {"Transit": "T", "Departure": "P", "Arrival": "A", "Stop": "F"} <NEW_LINE> return convert[string]
Parsing del tipo di fermata e conversione
625941b9566aa707497f4401
def create_topic(self): <NEW_LINE> <INDENT> logger.info("In topic creation kafka integration.") <NEW_LINE> client = AdminClient(self.broker_properties) <NEW_LINE> """Checks if the given topic exists""" <NEW_LINE> topic_metadata = client.list_topics(timeout=5) <NEW_LINE> topic_present = self.topic_name in set(t.topic for t in iter(topic_metadata.topics.values())) <NEW_LINE> if (not topic_present): <NEW_LINE> <INDENT> futures = client.create_topics( [NewTopic(topic=self.topic_name, num_partitions=self.num_partitions, replication_factor=self.num_replicas)] ) <NEW_LINE> for _, future in futures.items(): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> future.result() <NEW_LINE> logger.info(f"Created topic {self.topic_name}.") <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> print( f"Failed to create topic: {self.topic_name}. Exception: {e}" ) <NEW_LINE> pass <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> logger.info(f"Topic {self.topic_name} present doing nothing.")
Creates the producer topic if it does not already exist
625941b9be383301e01b5314
def is_set_max_noutput_items(self): <NEW_LINE> <INDENT> return _ra_blocks_swig.synch_clock_sptr_is_set_max_noutput_items(self)
is_set_max_noutput_items(synch_clock_sptr self) -> bool
625941b9a8370b7717052729
def read(self, reader): <NEW_LINE> <INDENT> self.docker_image = reader.get('docker', 'docker_image') <NEW_LINE> self.unix_socket = reader.get( 'docker', 'unix_socket', False, bool) <NEW_LINE> self.external_url = reader.get('docker', 'external_url') <NEW_LINE> self.external_registry_1 = reader.get('docker', 'external_registry_1') <NEW_LINE> self.external_registry_2 = reader.get('docker', 'external_registry_2') <NEW_LINE> self.private_registry_url = reader.get( 'docker', 'private_registry_url') <NEW_LINE> self.private_registry_name = reader.get( 'docker', 'private_registry_name') <NEW_LINE> self.private_registry_username = reader.get( 'docker', 'private_registry_username') <NEW_LINE> self.private_registry_password = reader.get( 'docker', 'private_registry_password')
Read docker settings.
625941b98a43f66fc4b53ef1
def getForwarders(forwarders=None, listenAddress=LISTEN_ADDRESS): <NEW_LINE> <INDENT> if forwarders is None: <NEW_LINE> <INDENT> forwarders = [] <NEW_LINE> with open("/etc/resolv.conf", "r") as resolvconf: <NEW_LINE> <INDENT> for line in resolvconf: <NEW_LINE> <INDENT> if line.startswith("nameserver"): <NEW_LINE> <INDENT> if line[11:-1] == listenAddress: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> forwarders.append((line[11:-1], DNS_PORT)) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> if len(forwarders) == 0: <NEW_LINE> <INDENT> forwarders = None <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> forwarders = forwarders.split(",") <NEW_LINE> forwarders = [(address, DNS_PORT) for address in forwarders] <NEW_LINE> <DEDENT> return forwarders
Reads forwarders from arguments or from resolv.conf and create a list of tuples containing the forwarders' IP and the port.
625941b98a349b6b435e7ffc
def __init__(self, name, stop_loading=lambda x: False): <NEW_LINE> <INDENT> self.filename = name.translate(utilities.safe_ascii) <NEW_LINE> self.seed = static_hash(configuration.session.name + name) <NEW_LINE> self.name = name.title() <NEW_LINE> self.answers = {} <NEW_LINE> self.file = os.path.join(log_directory(), self.filename) <NEW_LINE> self.last_time = 0 <NEW_LINE> self.previous_answer = self.answer('None') <NEW_LINE> self.last_command = "" <NEW_LINE> self.last_asked_question = "" <NEW_LINE> self.answerable_any = False <NEW_LINE> self.logs = [] <NEW_LINE> self.informations = {} <NEW_LINE> try: <NEW_LINE> <INDENT> os.mkdir(self.file) <NEW_LINE> <DEDENT> except OSError: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> self.answerables_cache = None <NEW_LINE> self.read_log(stop_loading) <NEW_LINE> self.lock = threading.Lock()
Initialise student data or read the log file
625941b91b99ca400220a939
def retrieve_phone_numbers(self, user_id): <NEW_LINE> <INDENT> endpoint = UserEndpoint.RETRIEVE_PHONE.value.format(id=user_id) <NEW_LINE> return self._post(url=self._build_url(endpoint))
POST /backoffice/v3/user/{id}/phone/retrieve :param str user_id: path parameter :return: Response object :rtype: requests.Response
625941b99b70327d1c4e0c5c
def __init__(self, filename: str = '', projectfile: Optional[HidraProjectFile] = None, peak_tag: str = '', hidraworkspace: Optional[HidraWorkspace] = None, peak_collection: Optional[PeakCollection] = None, point_list: Optional[PointList] = None, strain_single: Optional[StrainFieldSingle] = None) -> None: <NEW_LINE> <INDENT> r <NEW_LINE> super().__init__() <NEW_LINE> self._strains: List[StrainFieldSingle] = [] <NEW_LINE> self._winners: Optional[Tuple[np.ndarray, np.ndarray]] = None <NEW_LINE> self._point_list: Optional[PointList] = None <NEW_LINE> self._scalar_field: Optional[ScalarFieldSample] = None <NEW_LINE> self._effective_params: Dict[str, ScalarFieldSample] = {} <NEW_LINE> single_scan_kwargs = dict(filename=filename, projectfile=projectfile, peak_tag=peak_tag, hidraworkspace=hidraworkspace, peak_collection=peak_collection, point_list=point_list) <NEW_LINE> if True in [bool(v) for v in single_scan_kwargs.values()]: <NEW_LINE> <INDENT> strain_single = StrainFieldSingle(**single_scan_kwargs) <NEW_LINE> <DEDENT> if isinstance(strain_single, StrainFieldSingle): <NEW_LINE> <INDENT> self._initialize_from_strain_field_single(strain_single)
Converts a HidraWorkspace and PeakCollection into a ScalarField
625941b9d10714528d5ffb67
def glorot_uniform(shape): <NEW_LINE> <INDENT> fan_in, fan_out = get_fans(shape) <NEW_LINE> scale = np.sqrt(6. / (fan_in + fan_out)) <NEW_LINE> shape = (fan_out, fan_in) if len(shape) == 2 else shape <NEW_LINE> bias_shape = (fan_out, 1) if len(shape) == 2 else ( 1, 1, 1, shape[3]) <NEW_LINE> return uniform(shape, scale), uniform(shape=bias_shape)
A function for smart uniform distribution based initialization of parameters [Glorot et al. http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf] :param fan_in: The number of units in previous layer. :param fan_out: The number of units in current layer. :return:[numpy array, numpy array]: A randomly initialized array of shape [fan_out, fan_in] and the bias of shape [fan_out, 1]
625941b9187af65679ca4fa5
def findMin(self, nums): <NEW_LINE> <INDENT> start, end = 0, len(nums) - 1 <NEW_LINE> while start <= end: <NEW_LINE> <INDENT> if nums[start] < nums[end]: <NEW_LINE> <INDENT> return nums[start] <NEW_LINE> <DEDENT> mid = start + (end - start) // 2 <NEW_LINE> n = nums[mid] <NEW_LINE> if n >= nums[start]: <NEW_LINE> <INDENT> start = mid + 1 <NEW_LINE> <DEDENT> elif n < nums[start]: <NEW_LINE> <INDENT> end = mid <NEW_LINE> <DEDENT> <DEDENT> return nums[start - 1]
:type nums: List[int] :rtype: int Time: O(log N) Space: O(1) After rotation, array consists of the first increasing segment, and 2nd increasing segment. First segment has every element bigger than those in the 2nd increasing segment. Binary Search: first check if the array is rotated (first is < last means it's NOT rotated anymore), if not, then just reurn first element. If it's rotated, determine if the minimum is in the 1st increasing segment or 2nd increasing segment.
625941b98e05c05ec3eea1fa
def concatenation(): <NEW_LINE> <INDENT> choose_sequence() <NEW_LINE> global in_use_seq <NEW_LINE> if in_use_seq == []: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> real_answer = "".join(in_use_seq) <NEW_LINE> print("What would this sequence produce if .join() was used?") <NEW_LINE> user_answer = input() <NEW_LINE> if user_answer == real_answer: <NEW_LINE> <INDENT> print("Correct.") <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> print("Incorrect. The correct answer is:") <NEW_LINE> print(real_answer)
This function takes the desired test sequence and applies .join() to it, and tests to see if the user's input response matches.
625941b9e5267d203edcdb29
def internal_error(e): <NEW_LINE> <INDENT> message = "Error: {}".format(e) <NEW_LINE> return render_template("error.html", message=message), 500
The handler when an internal error happens :param e: The error message :return: templated html
625941b94428ac0f6e5ba67a
def set_arrays_to_one(self, negative=False): <NEW_LINE> <INDENT> for o in self.obj_data: <NEW_LINE> <INDENT> mod = o.mod_data[o.active_mod_index].array_mod <NEW_LINE> if self.axis == Axis.X: <NEW_LINE> <INDENT> if mod.use_relative_offset: <NEW_LINE> <INDENT> mod.relative_offset_displace[0] = 1 if negative == False else -1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> snap_factor = abs(o.mesh_obj_dims[0]) <NEW_LINE> mod.constant_offset_displace[0] = snap_factor if negative == False else -snap_factor <NEW_LINE> <DEDENT> <DEDENT> elif self.axis == Axis.Y: <NEW_LINE> <INDENT> if mod.use_relative_offset: <NEW_LINE> <INDENT> mod.relative_offset_displace[1] = 1 if negative == False else -1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> snap_factor = abs(o.mesh_obj_dims[1]) <NEW_LINE> mod.constant_offset_displace[1] = snap_factor if negative == False else -snap_factor <NEW_LINE> <DEDENT> <DEDENT> elif self.axis == Axis.Z: <NEW_LINE> <INDENT> if mod.use_relative_offset: <NEW_LINE> <INDENT> mod.relative_offset_displace[2] = 1 if negative == False else -1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> snap_factor = abs(o.mesh_obj_dims[2]) <NEW_LINE> mod.constant_offset_displace[2] = snap_factor if negative == False else -snap_factor
Set all arrays to one on the current axis.
625941b932920d7e50b28055
def _testmode_base_value_adj_changed_cb(self, adj, cname): <NEW_LINE> <INDENT> value = adj.get_value() <NEW_LINE> self._brush.set_base_value(cname, value)
User adjusted the setting's base value using the scale (test only)
625941b9dd821e528d63b033
def add_account(self): <NEW_LINE> <INDENT> api, screen_name = twitter.authentication() <NEW_LINE> self.auths[screen_name] = api <NEW_LINE> self.streams.append(twitter.open_userstream(api, self.receive_tweet, screen_name)) <NEW_LINE> twitter.getmyicon(api, screen_name) <NEW_LINE> accbutton = QPushButton(self) <NEW_LINE> accbutton.setWhatsThis(screen_name) <NEW_LINE> accbutton.setCheckable(True) <NEW_LINE> accbutton.toggled.connect(self.choose_account) <NEW_LINE> accbutton.setIcon(PyQt5.QtGui.QIcon('images/'+screen_name+'.jpg')) <NEW_LINE> accbutton.setIconSize(QSize(48, 48)) <NEW_LINE> self.accounts_hbox.insertWidget(self.accounts_hbox.count() - 1, accbutton)
add account and register it to local file
625941b9a17c0f6771cbdedc
def grind_hash_for_colors(hashcode): <NEW_LINE> <INDENT> while (len(hashcode) < MINIMUM_HASH_LEN): <NEW_LINE> <INDENT> chardiff = diff(len(hashcode), MINIMUM_HASH_LEN) <NEW_LINE> if DEBUG: <NEW_LINE> <INDENT> print ("Hashcode: %r with length: %d is too small. Appending difference." % (hashcode, len(hashcode))) <NEW_LINE> <DEDENT> hashcode += hashcode[:chardiff] <NEW_LINE> if DEBUG: <NEW_LINE> <INDENT> print ("Hash is now: %r with length: %d" % (hashcode, len(hashcode))) <NEW_LINE> <DEDENT> <DEDENT> hashparts = split_sequence(hashcode, HEX_COLOR_LEN) <NEW_LINE> colors = [] <NEW_LINE> for i in range(COLOR_QUANTITY): <NEW_LINE> <INDENT> colors.append(hex2rgb(hashparts[i])) <NEW_LINE> <DEDENT> if DEBUG: <NEW_LINE> <INDENT> print ("Generated colors: %r" % colors) <NEW_LINE> <DEDENT> return colors
Extracts information from the hashcode to generate different colors. Returns a list of colors in (r,g,b) tupels.
625941b9377c676e91272032
def add(self, ip=None, url=None, timestamp=None): <NEW_LINE> <INDENT> ip = ip or self.get_ip() <NEW_LINE> url = url or self.get_url() <NEW_LINE> if self._is_excluded(ip=ip, url=url): <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> entry = self._ip_banned.get(ip) <NEW_LINE> if ( not entry or (entry and (entry.count or 0) < cap.config.IPBAN_COUNT) and self._test_blocked(url, ip=ip) ): <NEW_LINE> <INDENT> self.block([ip], url=url) <NEW_LINE> return True <NEW_LINE> <DEDENT> if not timestamp or (timestamp and timestamp > datetime.now()): <NEW_LINE> <INDENT> timestamp = datetime.now() <NEW_LINE> <DEDENT> if entry: <NEW_LINE> <INDENT> entry.timestamp = timestamp <NEW_LINE> count = entry.count = entry.count + 1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> count = 1 <NEW_LINE> self._ip_banned[ip] = ObjectDict(timestamp=timestamp, count=count, url=url) <NEW_LINE> <DEDENT> cap.logger.info("%s %s added/updated ban list. Count: %d", ip, url, count) <NEW_LINE> return True
increment ban count ip of the current request in the banned list :return: :param ip: optional ip to add (ip ban will by default use current ip) :param url: optional url to display/store :param timestamp: entry time to set :return True if entry added/updated
625941b9be383301e01b5315
def monkey_patch_sqlalchemy(): <NEW_LINE> <INDENT> from sqlalchemy.orm import scoped_session <NEW_LINE> scoped_session.original_remove = scoped_session.remove <NEW_LINE> scoped_session.remove = lambda self: None <NEW_LINE> scoped_session.original_commit = scoped_session.commit <NEW_LINE> scoped_session.commit = scoped_session.flush
Replaces the SQLAlchemy `session.remove` method with a NOP. This avoids that any uncommited data is removed from session during the teardown hooks. You want to keep the data on session during the tests otherwise you won't be able to run any assertion on the database. Also replaces the `session.commit` with the `session.flush`. This is done for performance reasons and to avoid recreating the database on every test.
625941b945492302aab5e148
def print_results(packages, paths, branches, statuses, sorting=''): <NEW_LINE> <INDENT> packages = np.array(packages) <NEW_LINE> paths = np.array(paths) <NEW_LINE> branches = np.array(branches) <NEW_LINE> statuses = np.array(statuses) <NEW_LINE> if sorting == '' or sorting == 'p': <NEW_LINE> <INDENT> inds = packages.argsort() <NEW_LINE> <DEDENT> elif sorting == 't': <NEW_LINE> <INDENT> inds = branches.argsort() <NEW_LINE> <DEDENT> elif sorting == 's': <NEW_LINE> <INDENT> inds = statuses.argsort()[::-1] <NEW_LINE> <DEDENT> paddings = [] <NEW_LINE> paddings.append(max([len(x) for x in packages]) + 2) <NEW_LINE> paddings.append(max([len(x) for x in paths]) + 2) <NEW_LINE> paddings.append(max([len(x) for x in branches]) + 3) <NEW_LINE> for package, path, branch, status in zip(packages[inds], paths[inds], branches[inds], statuses[inds]): <NEW_LINE> <INDENT> print(f"{package:{paddings[0]}} {path:{paddings[1]}} {branch:{paddings[2]}} {status}")
Print the results Default sorting is alphabetical by package name (also 'p') Use 't' to sort by ticket Use 's' to sort by status
625941b945492302aab5e149
def empty_object(self): <NEW_LINE> <INDENT> return Citation()
Return an empty Citation object for comparison for changes. It is used by the base class L{EditPrimary}.
625941b967a9b606de4a7d45
def go_to(self, position_to_go, speed=0.05, relative=True): <NEW_LINE> <INDENT> if self._use_u3: <NEW_LINE> <INDENT> if relative: <NEW_LINE> <INDENT> displacement = position_to_go - self.get_relative_position() <NEW_LINE> sign = np.sign(displacement) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise NotImplementedError <NEW_LINE> <DEDENT> if sign > 0: <NEW_LINE> <INDENT> value = 5.0 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> value = 0.0 <NEW_LINE> <DEDENT> self.u3.writeRegister(5000, value) <NEW_LINE> <DEDENT> threads = [] <NEW_LINE> for traverse in self.traverses: <NEW_LINE> <INDENT> thread = Thread( target=traverse.go_to, args=(position_to_go,), kwargs={"speed": speed, "relative": relative}, ) <NEW_LINE> threads.append(thread) <NEW_LINE> <DEDENT> for thread in threads: <NEW_LINE> <INDENT> thread.start() <NEW_LINE> <DEDENT> for thread in threads: <NEW_LINE> <INDENT> thread.join() <NEW_LINE> <DEDENT> self.u3.writeRegister(5000, volt_no_movement)
Go to a position. Parameters ---------- position_to_go : float Position in m. speed : float Translation velocity (m/s, positive). relative : {True, bool} Absolute or relative position.
625941b992d797404e304012
def setTransformationElement(self, *args): <NEW_LINE> <INDENT> return _CsoundAC.MCRM_setTransformationElement(self, *args)
setTransformationElement(MCRM self, size_t index, size_t row, size_t column, double value)
625941b97d847024c06be149
def find_equivalence_relation_from_mapping(f): <NEW_LINE> <INDENT> p = find_partition_from_mapping(f) <NEW_LINE> return find_equivalence_relation_from_partition(p)
Given a mapping, return the corresponding equivalence relation
625941b96e29344779a6249e
def create_ggn_kc_conn(fd, kc_name_sec_dict, ggn_name_sec_dict, path=ggn_kc_syn_path, config=None): <NEW_LINE> <INDENT> global model_dict <NEW_LINE> cfg.logger.info('Started GGN->KC connection') <NEW_LINE> model_dict['ggn_kc_conn'] = [] <NEW_LINE> syn_dict = {} <NEW_LINE> syn_data = fd[path] <NEW_LINE> if len(syn_data.shape) == 2: <NEW_LINE> <INDENT> syn_data = syn_data[:, 0] <NEW_LINE> <DEDENT> for row in syn_data: <NEW_LINE> <INDENT> kc = kc_name_sec_dict[row['post']] <NEW_LINE> syn = h.GradedSyn(kc(row['postpos'])) <NEW_LINE> syn.vmid = row['vmid'] <NEW_LINE> syn.vslope = row['vslope'] <NEW_LINE> syn.e = row['e'] <NEW_LINE> syn.gbar = row['gbar'] <NEW_LINE> syn.tau = row['tau'] <NEW_LINE> h.setpointer(ggn_name_sec_dict[row['pre']](row['prepos'])._ref_v, 'vpre', syn) <NEW_LINE> model_dict['ggn_kc_conn'].append({'pre': row['pre'], 'prepos': row['prepos'], 'post': row['post'], 'postpos': row['postpos'], 'vmid': syn.vmid, 'vslope': syn.vslope, 'e': syn.e, 'gbar': syn.gbar, 'tau': syn.tau}) <NEW_LINE> syn_dict[row['post']] = syn <NEW_LINE> <DEDENT> model_dict['ggn_kc_syn'] = syn_dict <NEW_LINE> cfg.logger.info('Finished GGN->KC connection') <NEW_LINE> return syn_dict
Create graded synapses from GGN sections to KC based on synaptic connection data in fd at path `ggn_kc_syn_path`.
625941b9ec188e330fd5a62e
def place_widget(self, widget, x, y, index=0, target=None): <NEW_LINE> <INDENT> local_x, local_y = self.to_local(x, y) <NEW_LINE> if not target: <NEW_LINE> <INDENT> target = self.find_target(local_x, local_y, self.root, widget) <NEW_LINE> <DEDENT> if not self.from_drag: <NEW_LINE> <INDENT> self.add_widget_to_parent(widget, target) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> extra_args = {'x': x, 'y': y, 'index': index} <NEW_LINE> self.add_widget_to_parent(widget, target, from_kv=True, from_undo=True, extra_args=extra_args)
This function is used to first determine the target where to add the widget. Then it add that widget. :param target: where this widget should be added. If None, coordinates will be used to locate the target :param index: index used in add_widget :param x: widget position x :param y: widget position y :param widget: widget to add
625941b97b180e01f3dc468d
def __boolrel(self): <NEW_LINE> <INDENT> types = [token.EQUAL, token.LESS_THAN, token.LESS_THAN_EQUAL] <NEW_LINE> types.extend([token.GREATER_THAN_EQUAL, token.GREATER_THAN]) <NEW_LINE> types.append(token.NOT_EQUAL) <NEW_LINE> if self.current_token.tokentype in types: <NEW_LINE> <INDENT> temp_token = self.current_token <NEW_LINE> self.__advance() <NEW_LINE> return temp_token <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> s = 'expected a conditional operator, found "' + self.current_token.lexeme + '" in parser' <NEW_LINE> l = self.current_token.line <NEW_LINE> c = self.current_token.column <NEW_LINE> raise error.MyPLError(s, l, c)
<boolrel> ::= EQUAL | LESS_THAN | GREATER_THAN | LESS_THAN_EQUAL |
625941b9dd821e528d63b034
def delta_calc(airtemp): <NEW_LINE> <INDENT> l = sp.size(airtemp) <NEW_LINE> if l < 2: <NEW_LINE> <INDENT> temp = airtemp + 237.3 <NEW_LINE> b = 0.6108*(math.exp((17.27*airtemp)/temp)) <NEW_LINE> delta = (4098*b)/(temp**2) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> delta = sp.zeros(l) <NEW_LINE> for i in range(0, l): <NEW_LINE> <INDENT> temp = airtemp[i] + 237.3 <NEW_LINE> b = 0.6108*(math.exp(17.27*airtemp[i])/temp) <NEW_LINE> delta[i] = (4098*b)/(temp**2) <NEW_LINE> <DEDENT> <DEDENT> return delta
Calculates slope of saturation vapour pressure curve at air temperature [kPa/Celsius] http://www.fao.org/docrep/x0490e/x0490e07.htm :param airtemp: Temperature in Celsius :return: slope of saturation vapour pressure curve [kPa/Celsius]
625941b9f8510a7c17cf958d
def test_new_empty(self): <NEW_LINE> <INDENT> assert DSdecimal('') == '' <NEW_LINE> assert DSdecimal(' ') == ' ' <NEW_LINE> assert DSdecimal(None) is None
Test passing an empty value.
625941b930bbd722463cbc4b
def J_adjoint_standard(model, src_coords, wavelet, rec_coords, recin, space_order=8, is_residual=False, return_obj=False, born_fwd=False, isic=False, ws=None, t_sub=1, nlind=False): <NEW_LINE> <INDENT> rec, u, _ = op_fwd_J[born_fwd](model, src_coords, rec_coords, wavelet, save=True, ws=ws, space_order=space_order, isic=isic, t_sub=t_sub, nlind=nlind) <NEW_LINE> if not is_residual: <NEW_LINE> <INDENT> if nlind: <NEW_LINE> <INDENT> recin[:] = rec[0].data[:] - (recin[:] - rec[1].data) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> recin[:] = rec.data[:] - recin[:] <NEW_LINE> <DEDENT> <DEDENT> g, _ = gradient(model, recin, rec_coords, u, space_order=space_order, isic=isic) <NEW_LINE> if return_obj: <NEW_LINE> <INDENT> return .5*model.critical_dt*np.linalg.norm(recin)**2, g.data <NEW_LINE> <DEDENT> return g.data
Adjoint Jacobian (adjoint fo born modeling operator) operator on a shot record as a source (i.e data residual). Outputs the gradient with standard zero lag cross correlation over time. Parameters ---------- model: Model Physical model src_coords: Array Coordiantes of the source(s) wavelet: Array Source signature rec_coords: Array Coordiantes of the receiver(s) recin: Array Receiver data space_order: Int (optional) Spatial discretization order, defaults to 8 isic : Bool Whether or not to use ISIC imaging condition ws : Array Extended source spatial distribution is_residual: Bool Whether to treat the input as the residual or as the observed data born_fwd: Bool Whether to use the forward or linearized forward modeling operator nlind: Bool Whether to remove the non linear data from the input data. This option is only available in combination with `born_fwd` Returns ---------- Array Adjoint jacobian on the input data (gradient)
625941b98e71fb1e9831d636
def __init__(self, parameters, host_conn): <NEW_LINE> <INDENT> super().__init__(parameters, host_conn) <NEW_LINE> self._logger = get_logger(__name__) <NEW_LINE> self._lun = '0x{}'.format(self._parameters.get("volume_id")) <NEW_LINE> self._multipath = self._parameters["specs"].get("multipath", False) <NEW_LINE> has_path = False <NEW_LINE> self._adapters = deepcopy(self._parameters["specs"]["adapters"]) <NEW_LINE> for adapter in self._adapters: <NEW_LINE> <INDENT> for i in range(0, len(adapter.get('wwpns', []))): <NEW_LINE> <INDENT> has_path = True <NEW_LINE> adapter['wwpns'][i] = '0x{}'.format(adapter['wwpns'][i]) <NEW_LINE> <DEDENT> <DEDENT> if not has_path: <NEW_LINE> <INDENT> raise ValueError( 'No FCP path defined for disk LUN {}'.format(self._lun)) <NEW_LINE> <DEDENT> self._logger.debug("Creating DiskFcp " "lun=%s adapters=%s", self._lun, self._adapters)
Constructor Args: parameters (dict): Disk parameters as defined in the json schema. host_conn (GuestLinux): instance connected to linux host Raises: ValueError: in case no fcp path is provided
625941b94c3428357757c1b3
def to_dict(self): <NEW_LINE> <INDENT> result = {} <NEW_LINE> for attr, _ in six.iteritems(self.swagger_types): <NEW_LINE> <INDENT> value = getattr(self, attr) <NEW_LINE> if isinstance(value, list): <NEW_LINE> <INDENT> result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) <NEW_LINE> <DEDENT> elif hasattr(value, "to_dict"): <NEW_LINE> <INDENT> result[attr] = value.to_dict() <NEW_LINE> <DEDENT> elif isinstance(value, dict): <NEW_LINE> <INDENT> result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> result[attr] = value <NEW_LINE> <DEDENT> <DEDENT> if issubclass(NestedVRF, dict): <NEW_LINE> <INDENT> for key, value in self.items(): <NEW_LINE> <INDENT> result[key] = value <NEW_LINE> <DEDENT> <DEDENT> return result
Returns the model properties as a dict
625941b90a366e3fb873e6a0
def value_trade(board): <NEW_LINE> <INDENT> attackers = [] <NEW_LINE> for ally in board.ally_minions: <NEW_LINE> <INDENT> if ally.attack < 5 and not ally.exhausted: <NEW_LINE> <INDENT> attackers.append(ally) <NEW_LINE> <DEDENT> <DEDENT> ememies = board.enemy_minions <NEW_LINE> value_thresh = 5 <NEW_LINE> trades = [] <NEW_LINE> for ally in attackers: <NEW_LINE> <INDENT> for enemy in ememies: <NEW_LINE> <INDENT> v_enemy = enemy.health + enemy.attack <NEW_LINE> v_ally = ally.health + ally.attack <NEW_LINE> if ally.attack > enemy.health and v_enemy - v_ally > value_thresh: <NEW_LINE> <INDENT> trades.append((ally.position, enemy.position)) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return trades
Value trades examples: 2/1 into a 6/2, both die but 2/1 worth less 4/5 into a 3/2
625941b9460517430c394016
def droplet_enable_ipv6(self, droplet): <NEW_LINE> <INDENT> self.simple_droplet_action('enable_ipv6', droplet)
Send an API request to enable IPv6 on a droplet. >>> cli = batfish.Client() >>> cli.droplet_enable_ipv6(123456) {'response': "Nothing", 'reason': "Meh."} :param droplet: The droplet to modify, either a droplet ID or an instance of `batfish.models.Droplet`. :rtype: Dictionary of the JSON response.
625941b923849d37ff7b2f1a
def slider_particle_mass(self): <NEW_LINE> <INDENT> sld = QSlider(Qt.Horizontal, self) <NEW_LINE> sld.setFocusPolicy(Qt.NoFocus) <NEW_LINE> sld.setGeometry(85, 180, 100, 30) <NEW_LINE> sld.setMinimum(1) <NEW_LINE> sld.setMaximum(1000) <NEW_LINE> sld.setValue(self.mass / 5000) <NEW_LINE> sld.valueChanged[int].connect(self.__sld_changed)
Init function for slider that changes mass of the particle that will be created next
625941b93317a56b86939af2
def exchange_code(authorization_code): <NEW_LINE> <INDENT> flow = flow_from_clientsecrets(CLIENTSECRETS_LOCATION, ' '.join(SCOPES)) <NEW_LINE> try: <NEW_LINE> <INDENT> credentials = flow.step2_exchange(authorization_code) <NEW_LINE> return credentials <NEW_LINE> <DEDENT> except FlowExchangeError as error: <NEW_LINE> <INDENT> logging.error('An error occurred: %s', error) <NEW_LINE> raise CodeExchangeException(None)
Exchange an authorization code for OAuth 2.0 credentials. Args: authorization_code: Authorization code to exchange for OAuth 2.0 credentials. Returns: oauth2client.client.OAuth2Credentials instance. Raises: CodeExchangeException: an error occurred.
625941b9c432627299f04acd
def OpenStream(self, request_iterator, context): <NEW_LINE> <INDENT> context.set_code(grpc.StatusCode.UNIMPLEMENTED) <NEW_LINE> context.set_details('Method not implemented!') <NEW_LINE> raise NotImplementedError('Method not implemented!')
Opens a Query- and Instruction stream to AxonServer.
625941b921a7993f00bc7b73
def search(self, term): <NEW_LINE> <INDENT> if self.isRunning(): <NEW_LINE> <INDENT> raise RuntimeError("A search is alredy being made") <NEW_LINE> <DEDENT> self.setProxy() <NEW_LINE> self.term = term <NEW_LINE> self.start()
Performs a lookup in Github REST API based on term
625941b9a4f1c619b28afeca
def _iter(self): <NEW_LINE> <INDENT> get_weight = (self.transformer_weights or {}).get <NEW_LINE> return ((name, trans, get_weight(name)) for name, trans in self.transformer_list if trans != 'drop')
Generate (name, trans, weight) tuples excluding None and 'drop' transformers.
625941b99f2886367277a71a
def _get_template_for_given_resolution(self, res, return_): <NEW_LINE> <INDENT> path = self._layer_files[self._res_indices[res][0]] <NEW_LINE> if return_ == "path": <NEW_LINE> <INDENT> return_value = path <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> with rasterio.open(str(path)) as src: <NEW_LINE> <INDENT> if return_ == "meta": <NEW_LINE> <INDENT> return_value = src.meta <NEW_LINE> <DEDENT> elif return_ == "windows": <NEW_LINE> <INDENT> return_value = tuple(src.block_windows()) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError("'return_' must be 'path', meta' or 'windows'.") <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return return_value
Given specified resolution ('res') return template layer 'path', 'meta' or 'windows'.
625941b94c3428357757c1b4
def test_fullResponse(self): <NEW_LINE> <INDENT> data = { "todoItems": [ { "description": "Eat cookies", "dueDate": datetime.now(amp.utc), "completed": True }, { "description": "Be happy", "dueDate": datetime.now(amp.utc), "completed": False } ] } <NEW_LINE> self.assertWellFormedResponse(data)
Tests that a response with some to-do items is valid.
625941b9cb5e8a47e48b7938
def get_zaxis(self): <NEW_LINE> <INDENT> return self.zaxis
Return the ``ZAxis`` (`~.axis3d.Axis`) instance.
625941b9462c4b4f79d1d559
def loc2bbox(src_bbox, loc): <NEW_LINE> <INDENT> if src_bbox.shape[0] == 0: <NEW_LINE> <INDENT> return np.zeros((0, 4), dtype=loc.dtype) <NEW_LINE> <DEDENT> src_bbox = src_bbox.astype(src_bbox.dtype, copy=False) <NEW_LINE> src_height = src_bbox[:, 2] - src_bbox[:, 0] <NEW_LINE> src_width = src_bbox[:, 3] - src_bbox[:, 1] <NEW_LINE> src_ctr_y = src_bbox[:, 0] + 0.5 * src_height <NEW_LINE> src_ctr_x = src_bbox[:, 1] + 0.5 * src_width <NEW_LINE> dy = loc[:, 0::4] <NEW_LINE> dx = loc[:, 1::4] <NEW_LINE> dh = loc[:, 2::4] <NEW_LINE> dw = loc[:, 3::4] <NEW_LINE> ctr_y = dy * src_height[:, np.newaxis] + src_ctr_y[:, np.newaxis] <NEW_LINE> ctr_x = dx * src_width[:, np.newaxis] + src_ctr_x[:, np.newaxis] <NEW_LINE> with warnings.catch_warnings(record=True) as w_m: <NEW_LINE> <INDENT> warnings.simplefilter("always") <NEW_LINE> h = np.exp(dh) * src_height[:, np.newaxis] <NEW_LINE> w = np.exp(dw) * src_width[:, np.newaxis] <NEW_LINE> if len(w_m): <NEW_LINE> <INDENT> print(w_m[0].message) <NEW_LINE> <DEDENT> <DEDENT> dst_bbox = np.zeros(loc.shape, dtype=loc.dtype) <NEW_LINE> dst_bbox[:, 0::4] = ctr_y - 0.5 * h <NEW_LINE> dst_bbox[:, 1::4] = ctr_x - 0.5 * w <NEW_LINE> dst_bbox[:, 2::4] = ctr_y + 0.5 * h <NEW_LINE> dst_bbox[:, 3::4] = ctr_x + 0.5 * w <NEW_LINE> return dst_bbox
Decode bounding boxes from bounding box offsets and scales. Given bounding box offsets and scales computed by :meth:`bbox2loc`, this function decodes the representation to coordinates in 2D image coordinates. Given scales and offsets :math:`t_y, t_x, t_h, t_w` and a bounding box whose center is :math:`(y, x) = p_y, p_x` and size :math:`p_h, p_w`, the decoded bounding box's center :math:`\hat{g}_y`, :math:`\hat{g}_x` and size :math:`\hat{g}_h`, :math:`\hat{g}_w` are calculated by the following formulas. * :math:`\hat{g}_y = p_h t_y + p_y` * :math:`\hat{g}_x = p_w t_x + p_x` * :math:`\hat{g}_h = p_h \exp(t_h)` * :math:`\hat{g}_w = p_w \exp(t_w)` The decoding formulas are used in works such as R-CNN [#]_. The output is same type as the type of the inputs. .. [#] Ross Girshick, Jeff Donahue, Trevor Darrell, Jitendra Malik. Rich feature hierarchies for accurate object detection and semantic segmentation. CVPR 2014. Args: src_bbox (array): A coordinates of bounding boxes. Its shape is :math:`(R, 4)`. These coordinates are :math:`p_{ymin}, p_{xmin}, p_{ymax}, p_{xmax}`. loc (array): An array with offsets and scales. The shapes of :obj:`src_bbox` and :obj:`loc` should be same. This contains values :math:`t_y, t_x, t_h, t_w`. Returns: array: Decoded bounding box coordinates. Its shape is :math:`(R, 4)`. The second axis contains four values :math:`\hat{g}_{ymin}, \hat{g}_{xmin}, \hat{g}_{ymax}, \hat{g}_{xmax}`.
625941b9ec188e330fd5a62f
def apply_B(f): <NEW_LINE> <INDENT> N = f.shape[0] - 1 <NEW_LINE> Bf = np.zeros((N-1, N-1), dtype=complex) <NEW_LINE> for i, j in it.product(range(1, N), repeat=2): <NEW_LINE> <INDENT> Bf[i-1, j-1] = f[i, j] + 1/12 * ( f[i+1, j] - 2*f[i, j] + f[i-1, j] + f[i, j+1] - 2*f[i, j] + f[i, j-1] ) <NEW_LINE> <DEDENT> return Bf
Input: f -- shape (N+1, N+1) Output: Bf -- shape (N-1, N-1)
625941b9460517430c394017
def dataset_fixed_cov(n = 300, dim = 2): <NEW_LINE> <INDENT> np.random.seed(42) <NEW_LINE> C = np.array([[0., -0.23], [0.83, .23]]) <NEW_LINE> X = np.r_[np.dot(np.random.randn(n, dim), C), np.dot(np.random.randn(n, dim), C) + np.array([1, 1])] <NEW_LINE> Y = np.hstack((np.zeros(n), np.ones(n))).astype(np.int) <NEW_LINE> return X, Y
该函数用于生成样本数据, 二维高斯分布
625941b90fa83653e4656e46
def test_create_user_with_email_successful(self): <NEW_LINE> <INDENT> email = '[email protected]' <NEW_LINE> password = 'testpass123' <NEW_LINE> user = get_user_model().objects.create_user( email=email, password=password ) <NEW_LINE> self.assertEqual(user.email, email) <NEW_LINE> self.assertTrue(user.check_password(password))
Test creating a new user with an email is successful
625941b9b830903b967e97a0
def main(args): <NEW_LINE> <INDENT> subcommands = dict( add=add, modify=modify, delete=delete, list=list_jobs, merge=merge, daemon=daemon, update=update, ) <NEW_LINE> (subcommand, options) = option_parser(subcommands.keys(), args) <NEW_LINE> if subcommand: <NEW_LINE> <INDENT> if subcommand in subcommands: <NEW_LINE> <INDENT> subcommands[subcommand](options) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise job_manager.UserError('subcommand not recognised: %s' % (subcommand)) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> raise job_manager.UserError('No subcommand supplied')
Wrapper around a JobCache instance providing a command-line interface. args: list of arguments. For full usage, see top-level __doc__.
625941b94f6381625f1148cf
def checkcron(command, crontab): <NEW_LINE> <INDENT> for line in crontab.split('\n'): <NEW_LINE> <INDENT> if command in line: return True, line <NEW_LINE> <DEDENT> return False, "command not found"
looks through crontab, returns command if found
625941b9167d2b6e31218a26
def get_waveforms(self, spike_ids, channel_ids): <NEW_LINE> <INDENT> if self.waveform_loader is None: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> out = self.waveform_loader.get(spike_ids, channel_ids) <NEW_LINE> assert out.dtype in (np.float32, np.float64) <NEW_LINE> assert out.shape[0] == len(spike_ids) <NEW_LINE> assert out.shape[2] == len(channel_ids) <NEW_LINE> return out
Return several waveforms on specified channels.
625941b9a17c0f6771cbdedd
def tracks_w_artist_id(self, artist_id): <NEW_LINE> <INDENT> trks = self.session .get(self.base_url+'tracks', params = {'facets': 'artistId:' + str(artist_id), 'perPage': 150}) .json() <NEW_LINE> pages = trks['metadata']['totalPages'] <NEW_LINE> self._setup_progress_bar(pages) <NEW_LINE> trk_dict = {} <NEW_LINE> for i in xrange(pages): <NEW_LINE> <INDENT> trks = self.session .get(self.base_url + 'tracks', params = {'facets': 'artistId:' + str(artist_id), 'perPage': 150, 'page': i + 1}) .json() <NEW_LINE> for trk in trks['results']: <NEW_LINE> <INDENT> trk_dict[trk['name']] = trk['id'] <NEW_LINE> <DEDENT> self._update_progress_bar(i + 1) <NEW_LINE> <DEDENT> self._escape_progress_bar() <NEW_LINE> return(trk_dict)
Find all tracks by an artist using ID INPUT: artist_id - INT OUTPUT: tracks - track name and track id, DICT
625941b976e4537e8c351501