code
stringlengths
4
4.48k
docstring
stringlengths
1
6.45k
_id
stringlengths
24
24
def __init__(self, func_dict): <NEW_LINE> <INDENT> self.func_dict = func_dict <NEW_LINE> for key in self.required_func: <NEW_LINE> <INDENT> if key not in func_dict: <NEW_LINE> <INDENT> raise MissedConfigError('miss key {}'.format(key))
Constructor for Flow. :param func_dict: the function and its arguments that will be called in the Flow :type func_dict: dict
625941b85f7d997b871748f9
def new_page_root(self): <NEW_LINE> <INDENT> self.objectCount += 1 <NEW_LINE> root_obj_num = self.objectCount +100 <NEW_LINE> root_ref = IndirectObjectRef(root_obj_num, 0) <NEW_LINE> page_count = 0 <NEW_LINE> kids = [] <NEW_LINE> for pdf in self.pdfFiles: <NEW_LINE> <INDENT> root = pdf.get_page_root() <NEW_LINE> page_count += int(root[b"/Count"]) <NEW_LINE> kids += root[b"/Kids"].data <NEW_LINE> root[b"/Parent"] = root_ref <NEW_LINE> <DEDENT> rootDict = PDFDict({b"/Type": b"/Pages", b"/Kids": PDFArray(kids), b"/Count": str(page_count).encode("utf-8")}) <NEW_LINE> self.pdfFiles[0].trailer[b"/Size"] = str(root_obj_num).encode("utf-8") <NEW_LINE> self.pdfFiles[0].get_document_catalog()[b"/Pages"] = root_ref <NEW_LINE> self.merge_outline() <NEW_LINE> return PDFObject(rootDict,root_obj_num, 0, "n")
Creates a new page root containing all merged pages
625941b821bff66bcd6847b3
def run(self): <NEW_LINE> <INDENT> while True: <NEW_LINE> <INDENT> self.fetch_data() <NEW_LINE> self.retry_fetch_data()
It is the application starter
625941b899fddb7c1c9de1f1
def get_committer(tree: WorkingTree) -> str: <NEW_LINE> <INDENT> if getattr(tree.branch.repository, "_git", None): <NEW_LINE> <INDENT> cs = tree.branch.repository._git.get_config_stack() <NEW_LINE> user = os.environ.get("GIT_COMMITTER_NAME") <NEW_LINE> email = os.environ.get("GIT_COMMITTER_EMAIL") <NEW_LINE> if user is None: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> user = cs.get(("user",), "name").decode("utf-8") <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> user = None <NEW_LINE> <DEDENT> <DEDENT> if email is None: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> email = cs.get(("user",), "email").decode("utf-8") <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> email = None <NEW_LINE> <DEDENT> <DEDENT> if user and email: <NEW_LINE> <INDENT> return user + " <" + email + ">" <NEW_LINE> <DEDENT> from breezy.config import GlobalStack <NEW_LINE> return GlobalStack().get("email") <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> config = tree.branch.get_config_stack() <NEW_LINE> return config.get("email")
Get the committer string for a tree. Args: tree: A Tree object Returns: A committer string
625941b8cc0a2c11143dccf6
def gen_message_id(self): <NEW_LINE> <INDENT> return u"%s:%s" % (self.message_uuid, self.message_counter.__next__())
Generate a unique message id. Since uuid4s take a bit of time to compose, instead we keep a local counter combined with our hive's counter-uuid.
625941b8e5267d203edcdafe
def update_weights(net, input_values, desired_output, neuron_outputs, r=1): <NEW_LINE> <INDENT> deltaB = calculate_deltas(net, desired_output, neuron_outputs) <NEW_LINE> outputs = forward_prop(net, input_values, sigmoid)[1] <NEW_LINE> for w in net.get_wires(): <NEW_LINE> <INDENT> w.set_weight(w.get_weight() + r * node_value(w.startNode, input_values, outputs) * deltaB[w.endNode]) <NEW_LINE> <DEDENT> return net
Performs a single step of back-propagation. Computes delta_B values and weight updates for entire neural net, then updates all weights. Uses the sigmoid function to compute neuron output. Returns the modified neural net, with the updated weights.
625941b830c21e258bdfa2fb
def credit_screen(data): <NEW_LINE> <INDENT> log.info('CREATING ROLLING CREDITS...') <NEW_LINE> data = _credit_string_list(data) <NEW_LINE> credits_total = len(data) <NEW_LINE> count = -1 <NEW_LINE> duration = 120 <NEW_LINE> cmd = ffmpeg.get_command() <NEW_LINE> cmd += ' -f lavfi -i color=c=black:s=1920x1080:r=24:d=' + str(duration) + ' -vf "' <NEW_LINE> draw_txt_cmds = [] <NEW_LINE> for i in range(0, credits_total): <NEW_LINE> <INDENT> count += 1 <NEW_LINE> credit_sub_lines = [] <NEW_LINE> for j in range(0, len(data[i])): <NEW_LINE> <INDENT> count += 1 <NEW_LINE> credit_sub_lines.append(_credit_line(count, data[i][j])) <NEW_LINE> <DEDENT> draw_txt_cmds.append(','.join(credit_sub_lines)) <NEW_LINE> <DEDENT> draw_txt_cmds.append(_credit_line_last(count)) <NEW_LINE> cmd += ','.join(draw_txt_cmds) + '" ' <NEW_LINE> credits_path = settings.DIR_DATA_DATE + 'credits.mp4' <NEW_LINE> cmd += '-an -c:v libx264 -r 24 -preset ultrafast -qp 0 ' + credits_path <NEW_LINE> subprocess.call(cmd.encode('utf8'), shell=True) <NEW_LINE> log.info('ROLLING CREDITS SAVED.') <NEW_LINE> return credits_path
Build ffmpeg command for the rolling credit screen: for each line append a drawtext command @see http://stackoverflow.com/questions/11058479/ffmpeg-how-does-moving-overlay-text-command-work ffmpeg -y -f lavfi -i color=c=black:s=1920x1080:d=10 -vf drawtext="fontfile='/path/font.ttf':text='Rolling':fontsize=20:fontcolor=white:x=(w-text_w)/2:y=h-20*t" output.mp4 :return:
625941b8baa26c4b54cb0f81
def exp_runs_swing(self, outs, balls, strikes, first, second, third, px, pz, pitchType, hand, heatmap): <NEW_LINE> <INDENT> s = self.base_outcomes(outs, balls, strikes, first, second, third, 'S') <NEW_LINE> single = heatmap.prob_single(px, pz, pitchType, hand) * s <NEW_LINE> d = self.base_outcomes(outs, balls, strikes, first, second, third, 'D') <NEW_LINE> double = heatmap.prob_double(px, pz, pitchType, hand) * d <NEW_LINE> t = self.base_outcomes(outs, balls, strikes, first, second, third, 'T') <NEW_LINE> triple = heatmap.prob_double(px, pz, pitchType, hand) * t <NEW_LINE> hr = self.base_outcomes(outs, balls, strikes, first, second, third, 'HR') <NEW_LINE> homer = heatmap.prob_homer(px, pz, pitchType, hand) * hr <NEW_LINE> miss = heatmap.prob_miss(px, pz, pitchType, hand) * self.strike_outcomes(outs, balls, strikes, first, second, third) <NEW_LINE> out = heatmap.prob_out(px, pz, pitchType, hand) * self.out_outcomes(outs, balls, strikes, first, second, third) <NEW_LINE> foul = heatmap.prob_foul(px, pz, pitchType, hand) * self.foul_outcomes(outs, balls, strikes, first, second, third) <NEW_LINE> run_exp = sum([single, double, triple, homer, miss, out, foul]) <NEW_LINE> return run_exp
Returns the expected runs from the inning given the batter swings at the pitch. P(single|swing) * runExp(single) P(double|swing) * runExp(double) P(triple|swing) * runExp(triple) P(homer|swing) * runExp(homer) P(miss|swing) * runExp(miss) P(out|swing) * runExp(out) P(foul|swing) * runExp(foul)
625941b882261d6c526ab301
def configs(config_dirs: list): <NEW_LINE> <INDENT> for randrctl_home in config_dirs: <NEW_LINE> <INDENT> config_file = os.path.join(randrctl_home, CONFIG_NAME) <NEW_LINE> if os.path.isfile(config_file): <NEW_LINE> <INDENT> with open(config_file, 'r') as stream: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> logger.debug("reading configuration from %s", config_file) <NEW_LINE> cfg = load(stream, Loader=yaml.FullLoader) <NEW_LINE> if cfg: <NEW_LINE> <INDENT> yield (randrctl_home, cfg) <NEW_LINE> <DEDENT> <DEDENT> except YAMLError as e: <NEW_LINE> <INDENT> logger.warning("error reading configuration file %s", config_file)
Lazily visits specified directories and tries to parse a config file. If succeeds, yeilds a tuple (dir, config), where config is a dict :param config_dirs: list of directories that may contain configs :return: an iterator over tuples (config_dir, parsed_config), empty iterator if there are not valid configs
625941b8009cb60464c6321a
def __init__(self, path: str, network: str, logfile: str): <NEW_LINE> <INDENT> self._logfile = logfile <NEW_LINE> self._proxy_path = path <NEW_LINE> self._working_dir = os.getcwd() <NEW_LINE> try: <NEW_LINE> <INDENT> shutil.rmtree(self._working_dir + '/state-node-' + network) <NEW_LINE> print('state-node-' + network + ' has been deleted.') <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> print('state-node-' + network + ' not detected.')
Initialize a new cardano-byron-proxy Args: path: a full path to the proxy executable logfile: the name of the logfile (remember to add .txt)
625941b8cc40096d615957b1
def check_definition_by_marker(self, source, after_cursor, names): <NEW_LINE> <INDENT> source = textwrap.dedent(source) <NEW_LINE> for (i, line) in enumerate(source.splitlines()): <NEW_LINE> <INDENT> if after_cursor in line: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> <DEDENT> column = len(line) - len(after_cursor) <NEW_LINE> defs = Script(source, i + 1, column).goto_definitions() <NEW_LINE> assert [d.name for d in defs] == names
Find definitions specified by `after_cursor` and check what found For example, for the following configuration, you can pass ``after_cursor = 'y)'``.:: function( x, y) \ `- You want cursor to be here
625941b87b180e01f3dc4663
def test_serialize_response_with_unknown_error(): <NEW_LINE> <INDENT> response_factory = ResponseFactory() <NEW_LINE> error = ValueError("TestError") <NEW_LINE> def _serializer(response_type, response): <NEW_LINE> <INDENT> raise error <NEW_LINE> <DEDENT> response_type = "Test" <NEW_LINE> response_factory.register_response( response_type=response_type, parser=None, serializer=_serializer, ) <NEW_LINE> response = ResponseClass(response_type=response_type, parameters={}) <NEW_LINE> with pytest.raises(ResponseError) as exception_info: <NEW_LINE> <INDENT> response_factory.serialize_response(response=response) <NEW_LINE> <DEDENT> actual_error = exception_info.value <NEW_LINE> assert not isinstance(actual_error, ResponseSerializeError), "Wrong type of the error" <NEW_LINE> assert actual_error.args[0] == "Failed to serialize a response", "Wrong error message" <NEW_LINE> assert actual_error.__cause__ is error, "Wrong reason"
Check that ResponseError is raised if unhandled exception is raised by a serializer. 1. Create response factory. 2. Register a new response type with a serialier, raising an error different from ResponseSerializeError. 3. Try to serialize a response. 4. Check that ResponseError is raised.
625941b83539df3088e2e1a9
@receiver(score_reset) <NEW_LINE> def submissions_score_reset_handler(sender, **kwargs): <NEW_LINE> <INDENT> course_id = kwargs.get('course_id', None) <NEW_LINE> usage_id = kwargs.get('item_id', None) <NEW_LINE> user = None <NEW_LINE> if 'anonymous_user_id' in kwargs: <NEW_LINE> <INDENT> user = user_by_anonymous_id(kwargs.get('anonymous_user_id')) <NEW_LINE> <DEDENT> if all((user, course_id, usage_id)): <NEW_LINE> <INDENT> SCORE_CHANGED.send( sender=None, points_possible=0, points_earned=0, user_id=user.id, course_id=course_id, usage_id=usage_id ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> log.exception( u"Failed to process score_reset signal from Submissions API. " "user: %s, course_id: %s, usage_id: %s", user, course_id, usage_id )
Consume the score_reset signal defined in the Submissions API, and convert it to a SCORE_CHANGED signal indicating that the score has been set to 0/0. Converts the unicode keys for user, course and item into the standard representation for the SCORE_CHANGED signal. This method expects that the kwargs dictionary will contain the following entries (See the definition of score_reset): - 'anonymous_user_id': unicode, - 'course_id': unicode, - 'item_id': unicode
625941b8d18da76e23532330
def __addDataFiles( self, lfns, connection = False ): <NEW_LINE> <INDENT> res = self.__getFileIDsForLfns( lfns, connection = connection ) <NEW_LINE> if not res['OK']: <NEW_LINE> <INDENT> return res <NEW_LINE> <DEDENT> _fileIDs, lfnFileIDs = res['Value'] <NEW_LINE> for lfn in lfns: <NEW_LINE> <INDENT> if lfn not in lfnFileIDs: <NEW_LINE> <INDENT> req = "INSERT INTO DataFiles (LFN,Status) VALUES ('%s','New');" % lfn <NEW_LINE> res = self._update( req, connection ) <NEW_LINE> if not res['OK']: <NEW_LINE> <INDENT> return res <NEW_LINE> <DEDENT> lfnFileIDs[lfn] = res['lastRowId'] <NEW_LINE> <DEDENT> <DEDENT> return S_OK( lfnFileIDs )
Add a file to the DataFiles table and retrieve the FileIDs
625941b826238365f5f0ecc8
def openProjectActionSlot(self): <NEW_LINE> <INDENT> path, _ = QtWidgets.QFileDialog.getOpenFileName(self, "Open project", self.projectsDirPath(), "All files (*.*);;GNS3 Project (*.gns3);;GNS3 Portable Project (*.gns3project *.gns3p);;NET files (*.net)", "GNS3 Project (*.gns3)") <NEW_LINE> if path: <NEW_LINE> <INDENT> self.loadPath(path)
Slot called to open a project.
625941b86fece00bbac2d599
def test_upload_source_translations_locale_path_default(self): <NEW_LINE> <INDENT> self.mock_glob.return_value = [] <NEW_LINE> try: <NEW_LINE> <INDENT> self.api.upload_source_translations(project_slug='aaa') <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> self.mock_glob.assert_called_with('./locale/en-gb/LC_MESSAGES/*.po')
Ensure that `upload_source_translations` uses the default locale path.
625941b88a43f66fc4b53ec7
def wait_to_reach_target(self, extra_timeout=1.0): <NEW_LINE> <INDENT> waiting = True <NEW_LINE> now = time.time() <NEW_LINE> while (waiting): <NEW_LINE> <INDENT> if self.at_target(): <NEW_LINE> <INDENT> waiting = False <NEW_LINE> <DEDENT> rem_time = time.time() - self.time_at_target <NEW_LINE> if (rem_time > extra_timeout): <NEW_LINE> <INDENT> print("Timed out while waiting for motor to reach the target position") <NEW_LINE> waiting = False <NEW_LINE> return (False) <NEW_LINE> <DEDENT> print("Pos = %06.2f Time to go: %06.2f" % (math.degrees(self.get_encoder_angle()), -rem_time)) <NEW_LINE> time.sleep(0.1) <NEW_LINE> <DEDENT> return True
Block until the target position is reached Take the time from the predicted time and add an extra_timeout
625941b8293b9510aa2c30f7
def polynomial(terms, x): <NEW_LINE> <INDENT> apolyfunc = np.polynomial.Polynomial(terms) <NEW_LINE> return apolyfunc(x)
Evaluate a simple polynomial. Where: terms[0] is constant, terms[1] is for x, terms[2] is for x^2, etc. Arguments: - `terms` : sequence of coefficients - `x` : variable value Results: - value of the polynomial Examples: >>> t = 4.1 >>> polynomial((1.1, -3.2, 3.3, 4.5), t) 353.59749999999997 returns the value of: 1.1 + 2.2 * t + 3.3 * t^2 + 4.4 * t^3
625941b86e29344779a62473
def set_Limit(self, value): <NEW_LINE> <INDENT> InputSet._set_input(self, 'Limit', value)
Set the value of the Limit input for this Choreo. ((optional, integer) The number of results to return, up to 500.)
625941b87b180e01f3dc4664
def read_char(self): <NEW_LINE> <INDENT> return struct.unpack("b", self._read(1))[0]
Reads a C{char} from the stream.
625941b8e1aae11d1e749b12
def set(self, section, item, value): <NEW_LINE> <INDENT> if value is None: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> self.data[section][item] = value <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> self.data[section] = {item: value}
Set preference value. :param string section: category in which the item is defined. :param string item: item to set. :param value: value to set.
625941b88e7ae83300e4ae2a
def kurtosis(tss): <NEW_LINE> <INDENT> b = ctypes.c_void_p(0) <NEW_LINE> error_code = ctypes.c_int(0) <NEW_LINE> error_message = ctypes.create_string_buffer(KHIVA_ERROR_LENGTH) <NEW_LINE> KhivaLibrary().c_khiva_library.kurtosis_statistics(ctypes.pointer(tss.arr_reference), ctypes.pointer(b), ctypes.pointer(error_code), error_message) <NEW_LINE> if error_code.value != 0: <NEW_LINE> <INDENT> raise Exception(str(error_message.value.decode())) <NEW_LINE> <DEDENT> return Array(array_reference=b)
Returns the kurtosis of tss (calculated with the adjusted Fisher-Pearson standardized moment coefficient G2). :param tss: Expects an input array whose dimension zero is the length of the time series (all the same) and dimension one indicates the number of time series. :return: The kurtosis of tss.
625941b8711fe17d825421d1
def test_facility_update_doesnt_change_creator(self): <NEW_LINE> <INDENT> facility = self.clone_object(self.facility) <NEW_LINE> facility.created_by = self.user <NEW_LINE> facility.save() <NEW_LINE> original_creator = facility.created_by <NEW_LINE> self.client.force_login(self.super_user) <NEW_LINE> response = self.client.put(self.get_url(str(facility.external_id)), self.facility_data, format="json") <NEW_LINE> self.assertEqual(response.status_code, status.HTTP_200_OK) <NEW_LINE> facility.refresh_from_db() <NEW_LINE> self.assertEqual(facility.created_by, original_creator)
Test the updation of facility attribute doesn't change its creator
625941b88a43f66fc4b53ec8
def test_resolve_rooms_id_floor_by_floor_easyroom(self): <NEW_LINE> <INDENT> floor = self.db_building["dxf"]["floors"][0] <NEW_LINE> DXFRoomIdsResolver.resolve_rooms_id( self.building, floor, "easyroom" ) <NEW_LINE> self.assertEqual(floor["rooms"]["R003"], self.final_rooms["R003"]) <NEW_LINE> floor = self.db_building["dxf"]["floors"][1] <NEW_LINE> DXFRoomIdsResolver.resolve_rooms_id( self.building, floor, "easyroom" ) <NEW_LINE> self.assertEqual(floor["rooms"]["R023"], self.final_rooms["R023"]) <NEW_LINE> self.assertTrue("R022" not in floor["rooms"]) <NEW_LINE> self.assertTrue("R002" not in floor["rooms"])
Test r_id resolving with easyroom data, one floor at a time
625941b8596a897236089928
def read_description(): <NEW_LINE> <INDENT> return read("README.md")
Pip package description
625941b8aad79263cf390899
def train(self, batch_size, train_data_fn, validate_data_fn): <NEW_LINE> <INDENT> filename_queue = tf.train.string_input_producer([train_data_fn]) <NEW_LINE> reader = tf.TextLineReader(skip_header_lines=0) <NEW_LINE> key, value = reader.read(filename_queue) <NEW_LINE> decoded = tf.decode_csv( value, field_delim=' ', record_defaults=[[0] for i in range(self.max_seq_length * 2)]) <NEW_LINE> shuffle_batches = tf.train.shuffle_batch(decoded, batch_size=batch_size, capacity=batch_size * 50, min_after_dequeue=batch_size) <NEW_LINE> features = tf.transpose(tf.stack(shuffle_batches[0:self.max_seq_length])) <NEW_LINE> labels = tf.transpose(tf.stack(shuffle_batches[self.max_seq_length:])) <NEW_LINE> loss = self.loss(features, labels) <NEW_LINE> tvars = tf.trainable_variables() <NEW_LINE> grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars), self.gradients_clip) <NEW_LINE> optimizer = tf.train.AdamOptimizer(self.learning_rate) <NEW_LINE> train_op = optimizer.apply_gradients(zip(grads, tvars)) <NEW_LINE> test_input_x, test_input_y = self.load_data(validate_data_fn) <NEW_LINE> test_logits, test_real_length = self.inference(self.test_input, True) <NEW_LINE> sv = tf.train.Supervisor(logdir=self.log_dir) <NEW_LINE> with sv.managed_session(master="") as sess: <NEW_LINE> <INDENT> best_accuracy = 0.0 <NEW_LINE> for step in range(self.max_train_steps): <NEW_LINE> <INDENT> if sv.should_stop(): <NEW_LINE> <INDENT> break; <NEW_LINE> <DEDENT> keep_rate = 0.5 <NEW_LINE> start = time.time() <NEW_LINE> loss_val, transition_params_val, _ = sess.run( [loss, self.transition_params, train_op], {self.keep_rate : keep_rate}) <NEW_LINE> end = time.time() <NEW_LINE> if (step + 1) % 10 == 0: <NEW_LINE> <INDENT> print("loss {:.4f} at step {}, time {:.4f}".format(loss_val, step, end-start)) <NEW_LINE> <DEDENT> if (step + 1) % 1000 == 0 or step == 0: <NEW_LINE> <INDENT> logits, test_real_length_val = sess.run( [test_logits, test_real_length], {self.test_input: test_input_x, self.keep_rate: 1.0}) <NEW_LINE> accuracy = self.calculate_accuracy(logits, test_input_y, test_real_length_val, transition_params_val) <NEW_LINE> print("accuracy {:.4f} at step {}".format(accuracy, step)) <NEW_LINE> if accuracy > best_accuracy: <NEW_LINE> <INDENT> best_accuracy = accuracy <NEW_LINE> sv.saver.save(sess, self.log_dir + "/best_model") <NEW_LINE> print("best accuracy model") <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> sv.saver.save(sess, self.log_dir + "/finnal_model")
Creat train op to train the graph on the data Args: batch_size: batch size train_data_fn: the train data file name validate_data_fn: the validate data fn
625941b8167d2b6e312189fc
def is_owner(username, activity): <NEW_LINE> <INDENT> if not username: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> return username == activity.username
Returns true if the given username is the owner of the given activity.
625941b8ac7a0e7691ed3f38
def reset(self): <NEW_LINE> <INDENT> self.generate_account_num_lineEdit.clear() <NEW_LINE> self.generate_account_num_lineEdit.setEnabled(True) <NEW_LINE> self.account_type_comboBox.setCurrentIndex(0) <NEW_LINE> self.account_holder_name_lineEdit.clear() <NEW_LINE> self.date_of_birth_lineEdit.clear() <NEW_LINE> self.mobile_num_lineEdit.clear() <NEW_LINE> self.address_lineEdit.clear() <NEW_LINE> self.aadhar_num_lineEdit.clear() <NEW_LINE> self.male_radioButton.setChecked(False) <NEW_LINE> self.female_radioButton.setChecked(False) <NEW_LINE> self.other_radioButton.setChecked(False)
reset method is to resetting the all the lineEdits in the Digital_bank mian window :return:
625941b84527f215b584c2b9
def define_graph_topology(n_nodes, graph_topology, **args): <NEW_LINE> <INDENT> if graph_topology == 'ring': <NEW_LINE> <INDENT> graph = RingGraph(n_nodes) <NEW_LINE> <DEDENT> elif graph_topology == 'chain': <NEW_LINE> <INDENT> graph = ChainGraph(n_nodes) <NEW_LINE> <DEDENT> elif graph_topology == 'complete': <NEW_LINE> <INDENT> graph = CompleteGraph(n_nodes) <NEW_LINE> <DEDENT> elif graph_topology == 'time_varying_complete': <NEW_LINE> <INDENT> graph = TimeVaryingCompleteGraph(n_nodes, args['network_stability']) <NEW_LINE> <DEDENT> elif graph_topology == 'non_uniform_weight_ring_graph': <NEW_LINE> <INDENT> local_weight = args['local_weight'] <NEW_LINE> graph = NonUniformWeightRingGraph(n_nodes, local_weight) <NEW_LINE> <DEDENT> elif graph_topology == 'connected_cycle': <NEW_LINE> <INDENT> n_connectivity = args['n_connectivity'] <NEW_LINE> graph = NConnectedCycleGraph(n_nodes, n_connectivity, ) <NEW_LINE> <DEDENT> elif graph_topology == 'grid': <NEW_LINE> <INDENT> edge_length = int(np.sqrt(n_nodes)) <NEW_LINE> assert edge_length ** 2 == n_nodes <NEW_LINE> graph = TwoDimGridGraph(edge_length) <NEW_LINE> <DEDENT> return graph
Return the required graph object. Parameters ---------- n_nodes : {int} Number of nodes in the network. graph_topology : {str} A string describing the graph topology Returns ------- Graph A graph with specified information.
625941b8d53ae8145f87a0d4
def format_record(element, band_column=None, band_type='int'): <NEW_LINE> <INDENT> import json <NEW_LINE> props, geom = element <NEW_LINE> cast = eval(band_type) <NEW_LINE> if band_column and band_type: <NEW_LINE> <INDENT> return { band_column: cast(props), 'geom': json.dumps(geom) } <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return { **props, 'geom': json.dumps(geom) }
Format the tuple received from the geobeam file source into a record that can be inserted into BigQuery. If using a raster source, the bands and band_column will be combined. Args: band_column (str, optional): the name of the raster band column band_type (str, optional): Default to int. The data type of the raster band column to store in the database. Example: .. code-block:: python # vector p | beam.Map(geobeam.fn.format_record) # raster p | beam.Map(geobeam.fn.format_record, band_column='elev', band_type=float)
625941b81f5feb6acb0c49b3
def __init__( self, client_name: str = "unknown", testing=False, host=None, port=None, protocol="http", ) -> None: <NEW_LINE> <INDENT> self.testing = testing <NEW_LINE> self.client_name = client_name <NEW_LINE> self.client_hostname = socket.gethostname() <NEW_LINE> _config = load_config() <NEW_LINE> server_config = _config["server" if not testing else "server-testing"] <NEW_LINE> client_config = _config["client" if not testing else "client-testing"] <NEW_LINE> server_host = host or server_config["hostname"] <NEW_LINE> server_port = port or server_config["port"] <NEW_LINE> self.server_address = "{protocol}://{host}:{port}".format( protocol=protocol, host=server_host, port=server_port ) <NEW_LINE> self.instance = SingleInstance( f"{self.client_name}-at-{server_host}-on-{server_port}" ) <NEW_LINE> self.commit_interval = client_config["commit_interval"] <NEW_LINE> self.request_queue = RequestQueue(self) <NEW_LINE> self.last_heartbeat = {}
A handy wrapper around the aw-server REST API. The recommended way of interacting with the server. Can be used with a `with`-statement as an alternative to manually calling connect and disconnect in a try-finally clause. :Example: .. literalinclude:: examples/client.py :lines: 7-
625941b815fb5d323cde0968
def map_saver(map, begin, text): <NEW_LINE> <INDENT> custom_map1 = CustomMap(map, begin) <NEW_LINE> file_name = text + ".txt" <NEW_LINE> try: <NEW_LINE> <INDENT> with open(file_name, 'wb') as fid: <NEW_LINE> <INDENT> pickle.dump(custom_map1, fid) <NEW_LINE> <DEDENT> print("Saved map") <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> print("Failed to save map")
Description: This saves the custom map to a file for later use Arguments: map (list): List of all map nodes begin (tuple): contains the x y coordinates of the beginning text (string): Name of map Returns:
625941b86aa9bd52df036c01
def __init__( self, *, category: Optional[str] = None, endpoints: Optional[List["EndpointDependency"]] = None, **kwargs ): <NEW_LINE> <INDENT> super(OutboundNetworkDependenciesEndpoint, self).__init__(**kwargs) <NEW_LINE> self.etag = None <NEW_LINE> self.category = category <NEW_LINE> self.endpoints = endpoints <NEW_LINE> self.provisioning_state = None
:keyword category: The type of service accessed by the Kusto Service Environment, e.g., Azure Storage, Azure SQL Database, and Azure Active Directory. :paramtype category: str :keyword endpoints: The endpoints that the Kusto Service Environment reaches the service at. :paramtype endpoints: list[~kusto_management_client.models.EndpointDependency]
625941b866673b3332b91ef5
def compose(self): <NEW_LINE> <INDENT> numbers = (self['low'].value, self['high'].value) <NEW_LINE> display = ' to '.join(str(n) for n in numbers) <NEW_LINE> return display, numbers
Emits a tuple of low and high integers.
625941b8851cf427c661a379
def get_email_subject(self, result): <NEW_LINE> <INDENT> return "Import of your data finished at %s" % self.date_ended
This method define the subject of the email send by openerp at the end of import @param result: a list of tuple (table_name, number_of_record_created/updated, warning) for each table @return the subject of the mail
625941b85fdd1c0f98dc0090
def setUp(self): <NEW_LINE> <INDENT> self.EmptyStr = '' <NEW_LINE> self.NoPairsStr = '.....' <NEW_LINE> self.OneHelixStr = '((((()))))' <NEW_LINE> self.ManyHelicesStr = '(..(((...)).((.(((((..))).)))..((((..))))))...)' <NEW_LINE> self.EndsStr = '..(.)..' <NEW_LINE> self.FirstEndStr = '..((()))' <NEW_LINE> self.LastEndStr = '((..((.))))...' <NEW_LINE> self.InternalStr = '(((...)))..((.)).' <NEW_LINE> self.EddyStr = '..((((.(((...)))...((.((....))..)).)).))' <NEW_LINE> for s in self.__dict__.keys(): <NEW_LINE> <INDENT> if s.endswith('Str'): <NEW_LINE> <INDENT> self.__dict__[s[:-3]] = ViennaStructure(self.__dict__[s]).toTree()
Instantiate some standard ViennaNodes.
625941b8cb5e8a47e48b790d
def get_paradox_cmd (ontology): <NEW_LINE> <INDENT> args = [] <NEW_LINE> args.append(filemgt.read_config('paradox','command')) <NEW_LINE> option = filemgt.read_config('paradox','options') <NEW_LINE> if option is not None: <NEW_LINE> <INDENT> args.append(option) <NEW_LINE> <DEDENT> args.append('--time') <NEW_LINE> args.append(filemgt.read_config('paradox','timeout')) <NEW_LINE> args.append('--verbose') <NEW_LINE> args.append('2') <NEW_LINE> args.append('--model') <NEW_LINE> args.append('--tstp') <NEW_LINE> args.append(ontology.write_tptp_file()) <NEW_LINE> return args
we only care about the first element in the list of imports, which will we use as base name to obtain a single tptp file of the imports, which is the input for paradox.
625941b838b623060ff0ac4d
def generate_compute_configuration(self, uuid, containers, service, args, storage_info): <NEW_LINE> <INDENT> container_info = [] <NEW_LINE> for c in containers: <NEW_LINE> <INDENT> s = {'data_dev':'eth0', 'data_ip':c.internal_ip, 'manage_ip':c.external_ip, 'host_name':c.host_name, 'type':c.service_type} <NEW_LINE> s['container'] = c <NEW_LINE> s['storage'] = storage_info <NEW_LINE> s['args'] = args <NEW_LINE> container_info.append(s) <NEW_LINE> <DEDENT> return self._generate_configuration(uuid, container_info, service)
Generate a compute-specific configuration. This configuration lives in its own directory that gets copied in each container.
625941b892d797404e303fe9
def _read_from_file(self, stream, format='json'): <NEW_LINE> <INDENT> if format.lower() == 'json': <NEW_LINE> <INDENT> load = sjson.load <NEW_LINE> <DEDENT> elif format.lower() == 'yaml': <NEW_LINE> <INDENT> load = syaml.load <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError("Invalid database format: %s" % format) <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> if isinstance(stream, string_types): <NEW_LINE> <INDENT> with open(stream, 'r') as f: <NEW_LINE> <INDENT> fdata = load(f) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> fdata = load(stream) <NEW_LINE> <DEDENT> <DEDENT> except MarkedYAMLError as e: <NEW_LINE> <INDENT> raise syaml.SpackYAMLError("error parsing YAML database:", str(e)) <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> raise CorruptDatabaseError("error parsing database:", str(e)) <NEW_LINE> <DEDENT> if fdata is None: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> def check(cond, msg): <NEW_LINE> <INDENT> if not cond: <NEW_LINE> <INDENT> raise CorruptDatabaseError( "Spack database is corrupt: %s" % msg, self._index_path) <NEW_LINE> <DEDENT> <DEDENT> check('database' in fdata, "No 'database' attribute in YAML.") <NEW_LINE> db = fdata['database'] <NEW_LINE> check('installs' in db, "No 'installs' in YAML DB.") <NEW_LINE> check('version' in db, "No 'version' in YAML DB.") <NEW_LINE> installs = db['installs'] <NEW_LINE> version = Version(db['version']) <NEW_LINE> if version > _db_version: <NEW_LINE> <INDENT> raise InvalidDatabaseVersionError(_db_version, version) <NEW_LINE> <DEDENT> elif version < _db_version: <NEW_LINE> <INDENT> self.reindex(spack.store.layout) <NEW_LINE> installs = dict((k, v.to_dict()) for k, v in self._data.items()) <NEW_LINE> <DEDENT> def invalid_record(hash_key, error): <NEW_LINE> <INDENT> msg = ("Invalid record in Spack database: " "hash: %s, cause: %s: %s") <NEW_LINE> msg %= (hash_key, type(error).__name__, str(error)) <NEW_LINE> raise CorruptDatabaseError(msg, self._index_path) <NEW_LINE> <DEDENT> data = {} <NEW_LINE> for hash_key, rec in installs.items(): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> spec = self._read_spec_from_dict(hash_key, installs) <NEW_LINE> data[hash_key] = InstallRecord.from_dict(spec, rec) <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> invalid_record(hash_key, e) <NEW_LINE> <DEDENT> <DEDENT> for hash_key in data: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> self._assign_dependencies(hash_key, installs, data) <NEW_LINE> <DEDENT> except MissingDependenciesError: <NEW_LINE> <INDENT> raise <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> invalid_record(hash_key, e) <NEW_LINE> <DEDENT> <DEDENT> for hash_key, rec in data.items(): <NEW_LINE> <INDENT> rec.spec._mark_concrete() <NEW_LINE> <DEDENT> self._data = data
Fill database from file, do not maintain old data Translate the spec portions from node-dict form to spec form Does not do any locking.
625941b83d592f4c4ed1cede
def authorize_token(self, token, user): <NEW_LINE> <INDENT> return self.data_store.authorize_request_token(token, user)
Authorize a request token.
625941b8f9cc0f698b140464
@attr("functional") <NEW_LINE> @pytest.mark.skipif(os.getenv("TRAVIS", "false")=="true", reason="Test fails for some reason on Travis-CI") <NEW_LINE> @pytest.mark.skipif(os.getenv("CI", "false")=="true", reason="Test fails for some reason on GH Actions") <NEW_LINE> def test_not_overwritting_unicode_filename(): <NEW_LINE> <INDENT> input_files = [ 'The Big Bang Theory - S02E07.avi', 'The Big Bang Theory - [02x07] - The Panty Pin\u0303ata Polarization.avi'] <NEW_LINE> expected_files = [ 'The Big Bang Theory - S02E07.avi', 'The Big Bang Theory - [02x07] - The Panty Pin\u0303ata Polarization.avi'] <NEW_LINE> out_data = run_tvnamer( with_files = input_files, with_flags = ['--batch']) <NEW_LINE> verify_out_data(out_data, expected_files)
Test no error occurs when warning about a unicode filename being overwritten
625941b8ec188e330fd5a605
def check_keydown_events(event, ai_settings, screen, ship, bullets): <NEW_LINE> <INDENT> if event.key == pygame.K_RIGHT: <NEW_LINE> <INDENT> ship.moving_right = True <NEW_LINE> <DEDENT> elif event.key == pygame.K_SPACE: <NEW_LINE> <INDENT> fire_bullet(ai_settings, screen, ship, bullets) <NEW_LINE> <DEDENT> elif event.key == pygame.K_LEFT: <NEW_LINE> <INDENT> ship.moving_left = True <NEW_LINE> <DEDENT> elif event.key == pygame.K_q: <NEW_LINE> <INDENT> sys.exit()
响应按键
625941b807f4c71912b112e5
def tearDown(self): <NEW_LINE> <INDENT> self.clean_up_path(self.expected_paths) <NEW_LINE> super(TestExtractWorker, self).tearDown() <NEW_LINE> self.worker.channel.queue_delete( queue=self.params['PROXY_PUBLISH']['queue'] ) <NEW_LINE> self.worker.channel.exchange_delete( exchange=self.params['PROXY_PUBLISH']['exchange'] )
Generic tear down of the class. It removes the files and paths created in the test. It then calls the super class's tear down afterwards. It finally deletes the queues and exchanges that were created for the test. :return: no return
625941b83346ee7daa2b2bc8
def __getitem__(self, coords): <NEW_LINE> <INDENT> if coords is None: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> x, y = coords <NEW_LINE> try: <NEW_LINE> <INDENT> return self._board[(x, y)] <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> return None
Return the square at the given coordinates. **PARAMETERS** * *coords* - tuple of coordinates. Use a third optional element to indicate horizontal direction (among 'left', 'right'). Use a forth optional element to indicate if you are currently on the circle.
625941b8e64d504609d7469f
def merge_sort(a): <NEW_LINE> <INDENT> length = len(a) <NEW_LINE> if length == 1: <NEW_LINE> <INDENT> return a, 0 <NEW_LINE> <DEDENT> mid = length // 2 <NEW_LINE> left_halve, left_halve_inversions = merge_sort(a[0:mid]) <NEW_LINE> right_halve, right_halve_inversions = merge_sort(a[mid:]) <NEW_LINE> merged, merged_inversions = merge(left_halve, right_halve) <NEW_LINE> all_inversions = merged_inversions + left_halve_inversions + right_halve_inversions <NEW_LINE> return merged, all_inversions
algorithm mergesort for sorting a list and calculating the needed inversions Returns: sorted list, number of inversions
625941b87d43ff24873a2b02
def get_summary(self, page): <NEW_LINE> <INDENT> url = f"https://fr.wikipedia.org/api/rest_v1/page/summary/{page}" <NEW_LINE> if page: <NEW_LINE> <INDENT> r = requests.get(url) <NEW_LINE> summary = r.json()['extract'] <NEW_LINE> return summary <NEW_LINE> <DEDENT> return None
Get the summary from the page chosen
625941b832920d7e50b2802b
def load_and_preprocess_image(x, label): <NEW_LINE> <INDENT> image = tf.io.read_file(x) <NEW_LINE> image = tf.image.decode_jpeg(image, channels=3) <NEW_LINE> image = tf.cast(image, dtype='float32') <NEW_LINE> image = tf.math.divide(image, tf.constant(255, dtype=tf.float32)) <NEW_LINE> original = tf.image.central_crop(image, central_fraction=0.5) <NEW_LINE> augmented = tf.image.rot90(image, k=tf.random.uniform(shape=[], minval=0, maxval=4, dtype=tf.int32, seed=42)) <NEW_LINE> augmented = tf.image.random_crop(augmented, size=[128, 128, 3]) <NEW_LINE> return (original, label), (augmented, label)
Load image and return a (cropped) original and augmented version.
625941b80383005118ecf443
def dataInJson(self): <NEW_LINE> <INDENT> return self.__df.to_json()
data in json format
625941b8aad79263cf39089a
def _get_starting_letter(self): <NEW_LINE> <INDENT> random_number = random.uniform(0, 1) <NEW_LINE> starting_letter = sorted({k:v for (k, v) in self.pair_pr_table.frequencies.items() if k[0] == ' ' and k[1] != ' ' and v >= random_number}.keys())[0][1] <NEW_LINE> return starting_letter
Return the starting letter for a new random name, or alternatively a new random letter when we do not have any information from previous letters. (i.e. missing pairs in sample data)
625941b84c3428357757c189
def align_between_bands(filename): <NEW_LINE> <INDENT> out = subprocess.check_output(['psrstat', '-c', 'nbin', filename]) <NEW_LINE> out2 = out.split('=')[1] <NEW_LINE> nbins = out2.split('\n')[0] <NEW_LINE> filename_nodir = filename.split('/')[1] <NEW_LINE> psr = filename_nodir.split('_')[0] <NEW_LINE> direc = '/nimrod1/GBT/Ter5/GUPPI/PSRs/templates/' <NEW_LINE> template = direc + '%s_%s_gaussians.template' %(psr, nbins) <NEW_LINE> out = subprocess.check_output(['pat', '-R', '-TF', '-s', template, filename]) <NEW_LINE> rotateby = out.split(' ')[3] <NEW_LINE> command = "pam -r %s -e autorot -u ./output %s" %(rotateby, filename) <NEW_LINE> print(command) <NEW_LINE> os.system(command)
Line up between bands
625941b838b623060ff0ac4e
def set_description(self, value): <NEW_LINE> <INDENT> self._raise_error_modifying_statistic_with_perfect_match() <NEW_LINE> if value != self._description: <NEW_LINE> <INDENT> self._modified = True <NEW_LINE> self._description = value
Set the description of the statistic. INPUT: - a string -- the name of the statistic followed by its description on a separate line. OUTPUT: - Raise an error, if the query has a match with no intermediate combinatorial maps. This information is used when submitting the statistic with :meth:`submit`. EXAMPLES:: sage: s = findstat([(d, randint(1,1000)) for d in DyckWords(4)]); s # optional -- internet a new statistic on Cc0005: Dyck paths sage: s.set_description("Random values on Dyck paths.\r\nNot for submission.") # optional -- internet sage: s # optional -- internet a new statistic on Cc0005: Dyck paths sage: s.name() # optional -- internet 'Random values on Dyck paths.' sage: print(s.description()) # optional -- internet Random values on Dyck paths. Not for submission.
625941b87c178a314d6ef2b7
def build(self, topology): <NEW_LINE> <INDENT> self.motor_ensembles = {out['ensemble'] : topology['ensembles'][out['ensemble']]['n'] for out in topology['outputs'].values()} <NEW_LINE> self.decoding_config = topology['decoding'].copy() <NEW_LINE> self.act_ens_map = {out_name : out['ensemble'] for out_name, out in topology['outputs'].items()} <NEW_LINE> for decoder_name, decoder in topology['decoding'].items(): <NEW_LINE> <INDENT> self._decoders.update({decoder_name : decoders[decoder['scheme']]({ self.act_ens_map[decoder_name] : self.motor_ensembles[self.act_ens_map[decoder_name]]}, **decoder['params'])})
Builds the decoders using the topology config. dict.
625941b8cdde0d52a9e52e8e
def clone_config(self, csid, name, descrip): <NEW_LINE> <INDENT> self.curs.execute("SELECT family FROM config_set WHERE rowid = ?", (csid,)) <NEW_LINE> family = self.curs.fetchone() <NEW_LINE> if not family: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> self.curs.execute("SELECT COUNT(*) FROM config_set WHERE name = ? AND family = ?", (name,family[0])) <NEW_LINE> if self.curs.fetchone()[0]: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> cvals = self.get_config(csid) <NEW_LINE> newid = self.make_configset(name, family[0], descrip) <NEW_LINE> for (k,v) in cvals.items(): <NEW_LINE> <INDENT> self.set_config_value(newid, k, v) <NEW_LINE> <DEDENT> return newid
Clone configuration to new name and description
625941b86fece00bbac2d59a
def _build_distributed_network(model, strategy, inputs=None, targets=None, mode=None): <NEW_LINE> <INDENT> with K.get_graph().as_default(), strategy.scope(): <NEW_LINE> <INDENT> distributed_model = strategy.extended.call_for_each_replica( _build_network_on_replica, args=(model, inputs, targets, mode)) <NEW_LINE> if mode is _Mode.TRAIN: <NEW_LINE> <INDENT> model._distributed_model_train = distributed_model <NEW_LINE> <DEDENT> elif mode is _Mode.TEST: <NEW_LINE> <INDENT> model._distributed_model_test = distributed_model <NEW_LINE> <DEDENT> elif mode is _Mode.PREDICT: <NEW_LINE> <INDENT> model._distributed_model_predict = distributed_model <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> model._distributed_model = distributed_model
Create a cloned model on each replica.
625941b8c432627299f04aa3
def _set_error_handler_callbacks(self, app): <NEW_LINE> <INDENT> @app.errorhandler(NoAuthorizationError) <NEW_LINE> def handle_no_auth_error(e): <NEW_LINE> <INDENT> return self._unauthorized_callback(str(e)) <NEW_LINE> <DEDENT> @app.errorhandler(InvalidHeaderError) <NEW_LINE> def handle_invalid_header_error(e): <NEW_LINE> <INDENT> return self._invalid_token_callback(str(e)) <NEW_LINE> <DEDENT> @app.errorhandler(jwt.ExpiredSignatureError) <NEW_LINE> def handle_expired_error(e): <NEW_LINE> <INDENT> return self._expired_token_callback() <NEW_LINE> <DEDENT> @app.errorhandler(jwt.InvalidTokenError) <NEW_LINE> def handle_invalid_token_error(e): <NEW_LINE> <INDENT> return self._invalid_token_callback(str(e))
Sets the error handler callbacks used by this extension
625941b816aa5153ce3622d7
def to_bytes(self) -> bytes: <NEW_LINE> <INDENT> current_hash = self.current_tx_hash <NEW_LINE> if current_hash is None: <NEW_LINE> <INDENT> current_hash = bytes(DEFAULT_BYTE_SIZE) <NEW_LINE> <DEDENT> converted_current_hash = current_hash <NEW_LINE> next_hash = self.next_tx_hash <NEW_LINE> if next_hash is None: <NEW_LINE> <INDENT> next_hash = bytes(DEFAULT_BYTE_SIZE) <NEW_LINE> <DEDENT> converted_next_hash = next_hash <NEW_LINE> bytes_var = pack(self._STRUCT_FMT, self._VERSION, self.deploy_state.value, self._score_address.to_bytes(), self.owner.to_bytes(), converted_current_hash, converted_next_hash) <NEW_LINE> return bytes_var
Convert IconScoreDeployInfo object to bytes :return: data including information of IconScoreDeployInfo object
625941b8091ae35668666dc4
def BEQ(cpu, source): <NEW_LINE> <INDENT> offset = source() <NEW_LINE> extra_cycle = 0 <NEW_LINE> if cpu.get_status('zero') is True: <NEW_LINE> <INDENT> extra_cycle = 1 <NEW_LINE> pc = cpu.registers['pc'].read() <NEW_LINE> if (pc * 0xff00) != (pc + offset & 0xff00): <NEW_LINE> <INDENT> extra_cycle += 1 <NEW_LINE> <DEDENT> cpu.registers['pc'].increment(value=offset) <NEW_LINE> <DEDENT> return None, extra_cycle
Branch if result was zero
625941b8fff4ab517eb2f298
@login_required <NEW_LINE> @permission_required('accounts.view_member', raise_exception=True) <NEW_LINE> def shame(request): <NEW_LINE> <INDENT> context = {} <NEW_LINE> worst_cc_report_forgetters = get_user_model().objects.annotate(Count('ccinstances', distinct=True)).annotate( did_ccreport_count=Count(Case(When(ccinstances__event__ccreport__crew_chief=F('pk'), then=F('ccinstances'))), distinct=True)).annotate( failed_to_do_ccreport_count=(F('ccinstances__count') - F('did_ccreport_count'))).annotate( failed_to_do_ccreport_percent=(F('failed_to_do_ccreport_count') * 100 / F('ccinstances__count'))).order_by( '-failed_to_do_ccreport_count', '-failed_to_do_ccreport_percent')[:10] <NEW_LINE> context['worst_cc_report_forgetters'] = worst_cc_report_forgetters <NEW_LINE> return render(request, 'users_shame.html', context)
The LNL Crew Chief Report Hall of Shame. Tracks members who fail to complete event reports and lists the top 10 on a leaderboard.
625941b88c3a87329515821d
def calculate_and_log_all_metrics_train( self, curr_iter, timer, suffix='', tb_writer=None): <NEW_LINE> <INDENT> self.lr = float( workspace.FetchBlob('gpu_{}/lr'.format(cfg.ROOT_GPU_ID))) <NEW_LINE> cur_batch_size = get_batch_size_from_workspace() <NEW_LINE> cur_loss = sum_multi_gpu_blob('loss') <NEW_LINE> cur_loss = float(np.sum(cur_loss)) <NEW_LINE> self.aggr_loss += cur_loss * cur_batch_size <NEW_LINE> self.aggr_batch_size += cur_batch_size <NEW_LINE> if not cfg.MODEL.MULTI_LABEL: <NEW_LINE> <INDENT> accuracy_metrics = compute_multi_gpu_topk_accuracy( top_k=1, split=self.split, suffix=suffix) <NEW_LINE> accuracy5_metrics = compute_multi_gpu_topk_accuracy( top_k=5, split=self.split, suffix=suffix) <NEW_LINE> cur_err = (1.0 - accuracy_metrics['topk_accuracy']) * 100 <NEW_LINE> cur_err5 = (1.0 - accuracy5_metrics['topk_accuracy']) * 100 <NEW_LINE> self.aggr_err += cur_err * cur_batch_size <NEW_LINE> self.aggr_err5 += cur_err5 * cur_batch_size <NEW_LINE> <DEDENT> if (curr_iter + 1) % cfg.LOG_PERIOD == 0: <NEW_LINE> <INDENT> rem_iters = cfg.SOLVER.MAX_ITER - curr_iter - 1 <NEW_LINE> eta_seconds = timer.average_time * rem_iters <NEW_LINE> eta = str(datetime.timedelta(seconds=int(eta_seconds))) <NEW_LINE> epoch = (curr_iter + 1) / (cfg.TRAIN.DATASET_SIZE / cfg.TRAIN.BATCH_SIZE) <NEW_LINE> log_str = ' '.join(( '| Train ETA: {} LR: {:.8f}', ' Iters [{}/{}]', '[{:.2f}ep]', ' Time {:0.3f}', ' Loss {:7.4f}', )).format( eta, self.lr, curr_iter + 1, cfg.SOLVER.MAX_ITER, epoch, timer.diff, cur_loss, ) <NEW_LINE> if not cfg.MODEL.MULTI_LABEL: <NEW_LINE> <INDENT> log_str += ' top1 {:7.3f} top5 {:7.3f}'.format( cur_err, cur_err5) <NEW_LINE> <DEDENT> print(log_str) <NEW_LINE> if tb_writer is not None: <NEW_LINE> <INDENT> tb_writer.add_scalar('Train/Loss', cur_loss, curr_iter+1) <NEW_LINE> tb_writer.add_scalar('Train/LR', self.lr, curr_iter+1)
Calculate and log metrics for training.
625941b85fcc89381b1e1523
def __init__(self, x, y, z, w): <NEW_LINE> <INDENT> super().__init__(x, y, z) <NEW_LINE> self._w = w
Construct a QuaternionPacket from the given x, y, z, and w float values.
625941b84c3428357757c18a
def primes(upto): <NEW_LINE> <INDENT> primes = np.arange(3, upto + 1, 2) <NEW_LINE> isprime = np.ones(int((upto - 1) / 2), dtype=bool) <NEW_LINE> for factor in primes[:int(np.sqrt(upto))]: <NEW_LINE> <INDENT> if isprime[int((factor - 2) / 2)]: <NEW_LINE> <INDENT> isprime[int((factor * 3 - 2) / 2)::factor] = 0 <NEW_LINE> <DEDENT> <DEDENT> return np.insert(primes[isprime], 0, 2)
Prime sieve below an <upto> value. Copied from http://rebrained.com/?p=458 Parameters ---------- upto : int Find all primes [less than or equal] to this limit.
625941b8d99f1b3c44c673f5
def _generate_segments(self, origin_x_mm, origin_y_mm): <NEW_LINE> <INDENT> polygons = [] <NEW_LINE> last_point_index = len(self.points) - 1 <NEW_LINE> polygons.append(make_square(origin_x_mm, origin_y_mm, self.points[0], self.end_pad_size)) <NEW_LINE> polygons.append(make_square(origin_x_mm, origin_y_mm, self.points[last_point_index], self.end_pad_size)) <NEW_LINE> for i in range(0, len(self.points) - 1): <NEW_LINE> <INDENT> polygons.append(make_trace(origin_x_mm, origin_y_mm, self.points[i], self.points[i + 1], self.trace_size)) <NEW_LINE> <DEDENT> graph = utils.Graph() <NEW_LINE> for p in polygons: <NEW_LINE> <INDENT> graph.add_polygon(p) <NEW_LINE> <DEDENT> return graph.get_exterior_segments()
Takes the points and turns them into a series of segments that define the outline.
625941b84f6381625f1148a5
def setUp(self): <NEW_LINE> <INDENT> self.entries = [] <NEW_LINE> self.clock = task.Clock() <NEW_LINE> self.api = FakeTwitterAPI() <NEW_LINE> self.monitor = twitter.TwitterMonitor(self.api.filter, delegate=None, reactor=self.clock) <NEW_LINE> self.monitor.noisy = True <NEW_LINE> self.connects = None
Called at the beginning of each test. Set up a L{twitter.TwitterMonitor} with testable API, a clock to test delayed calls and make the test class the delegate.
625941b8d268445f265b4cd4
def __saveSpectrum(self): <NEW_LINE> <INDENT> my_name = '__saveSpectrum' <NEW_LINE> if not hasattr(self, "_Spectrum__spectrum") and self.__extract: <NEW_LINE> <INDENT> raise SpectrumNameError(my_name, "__spectrum is missing.") <NEW_LINE> <DEDENT> if not hasattr(self, "_Spectrum__wavelength") and self.__calibrated: <NEW_LINE> <INDENT> raise SpectrumNameError(my_name, "__wavelength is missing.") <NEW_LINE> <DEDENT> if not self.__extracted: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> f = open("{}_extracted.dat".format(self.__name), 'w') <NEW_LINE> if not self.__calibrated: <NEW_LINE> <INDENT> f.write("# spectrum\n") <NEW_LINE> [f.write("{}\n".format(s)) for s in self.__spectrum] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> f.write("# wavelength(Angs) spectrum\n") <NEW_LINE> [f.write("{} {}\n".format(w, s)) for w, s in zip(self.__wavelength, self.__spectrum)] <NEW_LINE> <DEDENT> f.close()
Save the extracted spectrum METHOD: Spectrum.__saveSpectrum TYPE: Private PURPOSE: Save the extracted spectrum. The name of the file is the same as the input file, but with the extension replaced by '_extracted.dat' appended to it. If __extracted is not set, then do nothing. If __calibrated is set, then also save the associated wavelength ARGUMENTS: NONE RETURNS: NONE RAISES: * SpectrumNameError when __spectrum is missing and __extracted is set. * SpectrumNameError when __wavelength is missing and __calibrated is set EXAMPLES: * self.__saveSpectrum()
625941b8627d3e7fe0d68cad
def main(csv_file, post_metadata): <NEW_LINE> <INDENT> with Timer() as total: <NEW_LINE> <INDENT> params = param_to_dict(post_metadata) <NEW_LINE> args = {'sep': params['separator'], 'semantic': params['semantic'], 'missing': params['missing'], 'datetime': params['datetime'] } <NEW_LINE> if 'header' in params: <NEW_LINE> <INDENT> args['first_col_header'] = params['header'] <NEW_LINE> <DEDENT> with Timer() as mtxtime: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> diff_mtx = DiffMatrix(csv_file, **args) <NEW_LINE> labels = diff_mtx.get_labels() <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> return {"error":str(e.__doc__)} <NEW_LINE> <DEDENT> <DEDENT> cols_count = ut.get_cols_count(csv_file, params['separator']) <NEW_LINE> hss = extract_hss(cols_count, params['lhs'], params['rhs']) <NEW_LINE> response = {'mtxtime': "{:.2f}".format(mtxtime.interval), 'result': {}, 'timing': []} <NEW_LINE> for combination in hss: <NEW_LINE> <INDENT> with Timer() as c: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> comb_dist_mtx = diff_mtx.split_sides(combination) <NEW_LINE> nd = RFDDiscovery(comb_dist_mtx) <NEW_LINE> r = nd.get_rfds(nd.standard_algorithm, combination) <NEW_LINE> rhs = r[[0]] <NEW_LINE> lhs = r.drop([r.columns[0]], axis=1) <NEW_LINE> result_df = pnd.concat([lhs, rhs], axis=1) <NEW_LINE> response['result'][json.dumps(name_combination(labels, combination))] = result_df.to_csv(sep=params['separator']) <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> return {"error": str(e.__doc__)} <NEW_LINE> <DEDENT> <DEDENT> response['timing'].append("{:.2f}".format(c.interval)) <NEW_LINE> <DEDENT> <DEDENT> response['total'] = "{:.2f}".format(total.interval) <NEW_LINE> return response
Given a valid CSV file's path and a series of parameters given by the user via the web gui, execute the algorithm on the given input. If some of the parameters are not valid, the method returns an error string message. It returns the output of the standard_algorithm in a dict where each element is the output of the algorithm for each combination given as input and with the combination itself as a key in JSON format. :param csv_file: valid path to a CSV file :type csv_file: str :param post_metadata: dict containing the user's parameters :type post_metadata: werkzeug.datastructures.ImmutableMultiDict :return: a dict containing the output of each combination or with an error message :rtype: dict
625941b8379a373c97cfa9a9
def bitop(self, operation, dest, *keys, **kwargs): <NEW_LINE> <INDENT> kwargs = {'callback': kwargs.get('callback', None)} <NEW_LINE> self._execute_command('BITOP', operation, dest, *keys, **kwargs)
Perform a bitwise operation using ``operation`` between ``keys`` and store the result in ``dest``.
625941b80c0af96317bb8048
def iter_customsql_exclude_apps(self, appnames): <NEW_LINE> <INDENT> for registered_appname in self.iter_appnames(): <NEW_LINE> <INDENT> if registered_appname not in appnames: <NEW_LINE> <INDENT> for customsql_class in self._customsql_classes_by_appname_map[registered_appname]: <NEW_LINE> <INDENT> yield customsql_class(registered_appname)
Iterate over all :class:`.AbstractCustomSql` subclasses registered except for the list of given appnames. The yielded values are objects of the classes initialized with no arguments.
625941b821a7993f00bc7b49
def clear_file_metadata(self) -> None: <NEW_LINE> <INDENT> self.packets = None <NEW_LINE> self.time = None <NEW_LINE> self.size = None <NEW_LINE> self.name = None
Clear file metadata.
625941b8925a0f43d2549cd3
def transpose(self): <NEW_LINE> <INDENT> result = [[0] * len(self.matrix) for k in range(len(self.matrix[0]))] <NEW_LINE> for i in range(len(self.matrix)): <NEW_LINE> <INDENT> for j in range(len(self.matrix[0])): <NEW_LINE> <INDENT> result[j][i] = self.matrix[i][j] <NEW_LINE> <DEDENT> <DEDENT> return Matrix(result)
Transposes a matrix. :return: Matrix object.
625941b8442bda511e8be285
@app.route("/recentlist", methods=['POST']) <NEW_LINE> def recentlist(): <NEW_LINE> <INDENT> user_id = request.form.get('user', None) <NEW_LINE> count = 14 <NEW_LINE> try: <NEW_LINE> <INDENT> count = int(request.form.get('count', str(count))) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> if count > 50: <NEW_LINE> <INDENT> count = 50 <NEW_LINE> <DEDENT> elif count < 1: <NEW_LINE> <INDENT> count = 1 <NEW_LINE> <DEDENT> if user_id is None: <NEW_LINE> <INDENT> user_id = User.current_id() <NEW_LINE> <DEDENT> return jsonify(result = Error.LEGAL, recentlist = _recentlist(user_id, max_len = count))
Return a list of recently completed games for the indicated user
625941b856ac1b37e626403f
def pltfm_mgr_qsfp_pwr_override_set(self, port_num, power_override, power_set): <NEW_LINE> <INDENT> pass
Parameters: - port_num - power_override - power_set
625941b8004d5f362079a196
def GetAssetFolderFromLocalPath(self,pLocalPath): <NEW_LINE> <INDENT> pass
Get a folder object using it's local path. pLocalPath : Path to the folder on the local disk. return : An FBAssetFolder* object, or NULL if the folder was not found or no mapping could be done.
625941b8796e427e537b0422
def shear(img, shear_rad): <NEW_LINE> <INDENT> affine_tf = tf.AffineTransform(shear=shear_rad) <NEW_LINE> return tf.warp(img, inverse_map=affine_tf)
Shear a image to a given radian. @Author: Rigel @param img: image to be sheared @param shear_rad: radian to which the image will be sheared @return: a numpy.ndarray object of this image
625941b873bcbd0ca4b2bedd
def test_check_metadata_normal_style(): <NEW_LINE> <INDENT> check = CheckTester(googlefonts_profile, "com.google.fonts/check/metadata/normal_style") <NEW_LINE> from fontbakery.constants import MacStyle <NEW_LINE> ttFont = TTFont(TEST_FILE("merriweather/Merriweather-Regular.ttf")) <NEW_LINE> assert_PASS(check(ttFont), 'with a good font...') <NEW_LINE> for i, name in enumerate(ttFont['name'].names): <NEW_LINE> <INDENT> if name.nameID == NameID.FONT_FAMILY_NAME: <NEW_LINE> <INDENT> backup = name.string <NEW_LINE> ttFont['name'].names[i].string = "Merriweather-Italic".encode(name.getEncoding()) <NEW_LINE> assert_results_contain(check(ttFont), FAIL, 'familyname-italic', 'with a non-italic font that has a "-Italic" in FONT_FAMILY_NAME...') <NEW_LINE> ttFont['name'].names[i].string = backup <NEW_LINE> <DEDENT> <DEDENT> for i, name in enumerate(ttFont['name'].names): <NEW_LINE> <INDENT> if name.nameID == NameID.FULL_FONT_NAME: <NEW_LINE> <INDENT> backup = name.string <NEW_LINE> ttFont['name'].names[i].string = "Merriweather-Italic".encode(name.getEncoding()) <NEW_LINE> assert_results_contain(check(ttFont), FAIL, 'fullfont-italic', 'with a non-italic font that has a "-Italic" in FULL_FONT_NAME...') <NEW_LINE> ttFont['name'].names[i].string = backup <NEW_LINE> <DEDENT> <DEDENT> ttFont['head'].macStyle |= MacStyle.ITALIC <NEW_LINE> assert_results_contain(check(ttFont), FAIL, 'bad-macstyle', 'with bad macstyle bit value...')
METADATA.pb font.style "normal" matches font internals ?
625941b823849d37ff7b2ef1
def ip_in(ip, model): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> for i in model.objects.all(): <NEW_LINE> <INDENT> if ip in i.network(): <NEW_LINE> <INDENT> return i.reason or "" <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> except ValueError: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> return False
Return True if the given ip address is in one of the ban models.
625941b863d6d428bbe4434f
def __init__(self): <NEW_LINE> <INDENT> self._queue = queue.Queue() <NEW_LINE> self._paused = False <NEW_LINE> thread = Thread(target=self._controller) <NEW_LINE> thread.daemon = True <NEW_LINE> thread.start()
Constructor.
625941b830c21e258bdfa2fd
def plot_confusion_matrix(data, labels, title, output_filename): <NEW_LINE> <INDENT> seaborn.set(color_codes=True) <NEW_LINE> plt.figure(1, figsize=(20, 6)) <NEW_LINE> plt.title(title) <NEW_LINE> seaborn.set(font_scale=1.4) <NEW_LINE> ax = seaborn.heatmap(data, annot=True, cmap="YlGnBu", cbar_kws={'label': 'Scale'}, fmt='g') <NEW_LINE> ax.set_xticklabels(labels) <NEW_LINE> ax.set_yticklabels(labels, rotation = 0) <NEW_LINE> ax.set(ylabel="True Label", xlabel="Predicted Label") <NEW_LINE> plt.savefig(output_filename, bbox_inches='tight', dpi=300) <NEW_LINE> plt.close()
Plot confusion matrix using heatmap. Args: data (list of list): List of lists with confusion matrix data. labels (list): Labels which will be plotted across x and y axis. output_filename (str): Path to output file.
625941b8507cdc57c6306b33
def calc_spike(self, mod_t, exp_t, args): <NEW_LINE> <INDENT> add_data = args.get("add_data", None) <NEW_LINE> temp_fit = 0 <NEW_LINE> spikes = [[], []] <NEW_LINE> if (self.model.spike_times == None): <NEW_LINE> <INDENT> spikes[0] = self.detectSpike(mod_t) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> spikes[0] = self.model.spike_times <NEW_LINE> <DEDENT> if add_data != None: <NEW_LINE> <INDENT> spikes[1] = add_data <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> spikes[1] = self.detectSpike(exp_t) <NEW_LINE> <DEDENT> mod_spike = len(spikes[0]) <NEW_LINE> exp_spike = len(spikes[1]) <NEW_LINE> try: <NEW_LINE> <INDENT> temp_fit += float(abs(mod_spike - exp_spike)) / float(exp_spike + mod_spike + 1) <NEW_LINE> <DEDENT> except ZeroDivisionError: <NEW_LINE> <INDENT> temp_fit += 1 <NEW_LINE> <DEDENT> if self.option.output_level == "1": <NEW_LINE> <INDENT> print("spike count") <NEW_LINE> print("mod: ", mod_spike) <NEW_LINE> print("exp: ", exp_spike) <NEW_LINE> print(temp_fit) <NEW_LINE> <DEDENT> return temp_fit
Calculates the normalized absolute differences of the number of spikes in the traces. :param mod_t: the trace obtained from the model as ``list`` :param exp_t: the input trace as ``list`` :param args: optional arguments as ``dictionary`` :return: the normalized absolute differences of the number of spikes, where the normalization is done by the sum of the number of spikes in both traces plus one
625941b84d74a7450ccd4023
def test_lowercase(self): <NEW_LINE> <INDENT> output_file = "board_1_test_output.txt" <NEW_LINE> BoggleBoard(DICT_FILE, BOARD_1_FILE, output_file) <NEW_LINE> board_1_output = read_file_to_string(output_file) <NEW_LINE> self.assertEqual(board_1_output, board_1_output.lower())
Validate output answers are lowercase.
625941b897e22403b379cdf8
def test_read_zip(self): <NEW_LINE> <INDENT> with zipfile.ZipFile('../test-data/zip-test.zip') as z: <NEW_LINE> <INDENT> with z.open('zip-test.txt') as f: <NEW_LINE> <INDENT> for line in f: <NEW_LINE> <INDENT> print(line) <NEW_LINE> print(line.decode()) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> self.assertEqual(True, True)
测试读取zip内的文本文件 :return:
625941b8a8370b7717052700
def as_ECL(self, epoch=None, ecl="IERS2010"): <NEW_LINE> <INDENT> m_ecl = AstrometryEcliptic() <NEW_LINE> m_ecl.PX = self.PX <NEW_LINE> m_ecl.POSEPOCH = self.POSEPOCH <NEW_LINE> c = self.coords_as_ECL(epoch=epoch, ecl=ecl) <NEW_LINE> m_ecl.ELONG.quantity = c.lon <NEW_LINE> m_ecl.ELAT.quantity = c.lat <NEW_LINE> m_ecl.PMELONG.quantity = c.pm_lon_coslat <NEW_LINE> m_ecl.PMELAT.quantity = c.pm_lat <NEW_LINE> m_ecl.ECL.value = ecl <NEW_LINE> dt = 1 * u.yr <NEW_LINE> c = coords.SkyCoord( ra=self.RAJ.quantity, dec=self.DECJ.quantity, obstime=self.POSEPOCH.quantity, pm_ra_cosdec=self.RAJ.uncertainty * np.cos(self.DECJ.quantity) / dt, pm_dec=self.DECJ.uncertainty / dt, frame=coords.ICRS, ) <NEW_LINE> c_ECL = c.transform_to(PulsarEcliptic(ecl=ecl)) <NEW_LINE> m_ecl.ELONG.uncertainty = c_ECL.pm_lon_coslat * dt / np.cos(c_ECL.lat) <NEW_LINE> m_ecl.ELAT.uncertainty = c_ECL.pm_lat * dt <NEW_LINE> c = coords.SkyCoord( ra=self.RAJ.quantity, dec=self.DECJ.quantity, obstime=self.POSEPOCH.quantity, pm_ra_cosdec=self.PMRA.uncertainty, pm_dec=self.PMDEC.uncertainty, frame=coords.ICRS, ) <NEW_LINE> c_ECL = c.transform_to(PulsarEcliptic(ecl=ecl)) <NEW_LINE> m_ecl.PMELONG.uncertainty = c_ECL.pm_lon_coslat <NEW_LINE> m_ecl.PMELAT.uncertainty = c_ECL.pm_lat <NEW_LINE> m_ecl.ELONG.frozen = self.RAJ.frozen <NEW_LINE> m_ecl.ELAT.frozen = self.DECJ.frozen <NEW_LINE> m_ecl.PMELONG.frozen = self.PMRA.frozen <NEW_LINE> m_ecl.PMELAT.frozen = self.PMDEC.frozen <NEW_LINE> return m_ecl
Return pint.models.astrometry.Astrometry object in PulsarEcliptic frame. Parameters ---------- epoch : `astropy.time.Time` or Float, optional new epoch for position. If Float, MJD(TDB) is assumed. Note that uncertainties are not adjusted. ecl : str, optional Obliquity for PulsarEcliptic frame Returns ------- pint.models.astrometry.AstrometryEcliptic
625941b850812a4eaa59c185
def shot_result(self, shot): <NEW_LINE> <INDENT> if self.players[0].turn: <NEW_LINE> <INDENT> for j in range(len(Grid.GRID_X)): <NEW_LINE> <INDENT> for i in range(len(Grid.GRID_Y)): <NEW_LINE> <INDENT> if self.players[1].grid.primary[j][i].x + self.players[1].grid.primary[j][i].y == shot: <NEW_LINE> <INDENT> if self.players[1].grid.primary[j][i].state == Grid.EMPTY: <NEW_LINE> <INDENT> self.players[1].grid.primary[j][i].state = Grid.MISS <NEW_LINE> self.players[0].grid.tracking[j][i].state = Grid.MISS <NEW_LINE> return None <NEW_LINE> <DEDENT> elif self.players[1].grid.primary[j][i].state == Grid.SHIP: <NEW_LINE> <INDENT> ship = self.players[1].grid.primary[j][i].ship <NEW_LINE> self.players[1].grid.primary[j][i].state = Grid.SHIP_HIT <NEW_LINE> self.players[1].grid.primary[j][i].ship = None <NEW_LINE> self.players[0].grid.tracking[j][i].state = Grid.HIT <NEW_LINE> return ship <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> for j in range(len(Grid.GRID_X)): <NEW_LINE> <INDENT> for i in range(len(Grid.GRID_Y)): <NEW_LINE> <INDENT> if self.players[0].grid.primary[j][i].x + self.players[0].grid.primary[j][i].y == shot: <NEW_LINE> <INDENT> if self.players[0].grid.primary[j][i].state == Grid.EMPTY: <NEW_LINE> <INDENT> self.players[0].grid.primary[j][i].state = Grid.MISS <NEW_LINE> self.players[1].grid.tracking[j][i].state = Grid.MISS <NEW_LINE> return None <NEW_LINE> <DEDENT> elif self.players[0].grid.primary[j][i].state == Grid.SHIP: <NEW_LINE> <INDENT> ship = self.players[0].grid.primary[j][i].ship <NEW_LINE> self.players[0].grid.primary[j][i].state = Grid.SHIP_HIT <NEW_LINE> self.players[0].grid.primary[j][i].ship = None <NEW_LINE> self.players[1].grid.tracking[j][i].state = Grid.HIT <NEW_LINE> return ship
Makes the changes to the grid according to the shot result
625941b830dc7b76659017ca
def send_and_read(self, packet, endpoint, timeout=10): <NEW_LINE> <INDENT> queue = self.get_endpoint_queue(endpoint) <NEW_LINE> self.send_packet(packet) <NEW_LINE> try: <NEW_LINE> <INDENT> return queue.get(timeout=timeout) <NEW_LINE> <DEDENT> finally: <NEW_LINE> <INDENT> queue.close()
Sends a packet, then returns the next response received from that endpoint. This method sets up a listener before it actually sends the message, avoiding a potential race. .. warning:: Avoid calling this method from an endpoint callback; doing so is likely to lead to deadlock. :param packet: The message to send. :type packet: .PebblePacket :param endpoint: The endpoint to read from :type endpoint: .PacketType :param timeout: The maximum time to wait before raising :exc:`.TimeoutError`. :return: The message read from the endpoint; of the same type as passed to ``endpoint``.
625941b8a79ad161976cbfa5
def get_path(self, state, request, defaultURL=None, **kwargs): <NEW_LINE> <INDENT> if self.path and self.path.startswith('fsm:fsm_'): <NEW_LINE> <INDENT> return reverse(self.path, kwargs=dict(node_id=self.pk)) <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> func = self._plugin.get_path <NEW_LINE> <DEDENT> except AttributeError: <NEW_LINE> <INDENT> if not self.path: <NEW_LINE> <INDENT> if defaultURL: <NEW_LINE> <INDENT> return defaultURL <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError('node has no path, and no defaultURL') <NEW_LINE> <DEDENT> <DEDENT> kwargs.update(state.get_all_state_data()) <NEW_LINE> return reverse_path_args(self.path, request.path, **kwargs) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return func(self, state, request, defaultURL=defaultURL, **kwargs)
Get URL for this page.
625941b88e71fb1e9831d60d
def cast(*args): <NEW_LINE> <INDENT> return _itkInvertIntensityImageFilterPython.itkInvertIntensityImageFilterID3ID3_cast(*args)
cast(itkLightObject obj) -> itkInvertIntensityImageFilterID3ID3
625941b866656f66f7cbc00a
@pytest.fixture(autouse=True) <NEW_LINE> def md_repo(tmpdir): <NEW_LINE> <INDENT> repo_dir = tmpdir.mkdir('md_repo') <NEW_LINE> repo_dir.join('foo').write('foo') <NEW_LINE> subprocess.check_call(['hg', 'init'], cwd=repo_dir.strpath) <NEW_LINE> subprocess.check_call(['hg', 'commit', '-q', '-m', 'foo', '-A'], cwd=repo_dir.strpath) <NEW_LINE> return repo_dir
A mock of our malware domains repo
625941b8e1aae11d1e749b14
def create_or_edit(self, id, seq, resource): <NEW_LINE> <INDENT> schema = HighlightSchema(exclude=('id', 'seq')) <NEW_LINE> json = self.service.encode(schema, resource) <NEW_LINE> schema = HighlightSchema() <NEW_LINE> resp = self.service.edit(self._base(id, seq), resource.line, json) <NEW_LINE> return self.service.decode(schema, resp)
Create or edit a highlight. :param id: Result ID as an int. :param seq: TestResult sequence ID as an int. :param resource: :class:`highlights.Highlight <highlights.Highlight>` object :return: :class:`highlights.Highlight <highlights.Highlight>` object :rtype: highlights.Highlight
625941b86e29344779a62475
def test_et_al(self): <NEW_LINE> <INDENT> text = 'Costa et al. reported the growth of HA nanowires due to the chemical potential of an amorphous calcium phosphate solution. This structural feature would make the {001} very sensitive to surrounding growth conditions.' <NEW_LINE> sents = [ 'Costa et al. reported the growth of HA nanowires due to the chemical potential of an amorphous calcium phosphate solution.', 'This structural feature would make the {001} very sensitive to surrounding growth conditions.' ] <NEW_LINE> self.assertEqual(sents, self.ps.tokenize(text))
Test the tokenizer handles et al. within a sentence correctly.
625941b88a349b6b435e7fd4
def __init__(self, name=None, aws_service_name=None): <NEW_LINE> <INDENT> self._name = None <NEW_LINE> self._aws_service_name = None <NEW_LINE> self.discriminator = None <NEW_LINE> self.name = name <NEW_LINE> self.aws_service_name = aws_service_name
AwsServiceRegionDiscoveryMethod - a model defined in Swagger
625941b87b180e01f3dc4666
def test_get_reply(self): <NEW_LINE> <INDENT> ret_value = jsonutils.dumps({self.op_id: 'foo_value'}) <NEW_LINE> with mock.patch.object( ovsdb_writer.OVSDBWriter, '_recv_data', return_value=jsonutils.dumps({self.op_id: 'foo_value'})) as recv_data, mock.patch.object(ovsdb_writer.OVSDBWriter, '_process_response', return_value=(ret_value, None)) as proc_response, mock.patch.object(ovsdb_writer.LOG, 'debug'): <NEW_LINE> <INDENT> self.l2gw_ovsdb._get_reply(self.op_id, mock.ANY) <NEW_LINE> self.assertTrue(recv_data.called) <NEW_LINE> self.assertTrue(proc_response.called)
Test case to test _get_reply.
625941b88e7ae83300e4ae2c
def load_from_setup(self): <NEW_LINE> <INDENT> data =pd.read_csv("setup.csv") <NEW_LINE> N = len(list(data['Species'])) <NEW_LINE> for i in range(N): <NEW_LINE> <INDENT> name = list(data['Species'])[i] <NEW_LINE> n0 = list(data['Initial cond'])[i] <NEW_LINE> k = list(data['Growth rate'])[i] <NEW_LINE> K = list(data['Carrying cap'])[i] <NEW_LINE> c = list(data['Change rate'])[i] <NEW_LINE> interactions = list(data['A_row'+str(i)]) <NEW_LINE> self.addSpecies(name) <NEW_LINE> self.setInitialCond(name, n0) <NEW_LINE> self.setGrowthRate(name, k) <NEW_LINE> self.setCarrCap(name, K) <NEW_LINE> self.setChangeRate(name, c) <NEW_LINE> for j in range(N): <NEW_LINE> <INDENT> if not (i==j): <NEW_LINE> <INDENT> self.setInteraction(name, list(data['Species'])[j], interactions[j])
This method initializes the system with the data stored in the setup file currently present in the directory.
625941b82eb69b55b151c70b
def main(): <NEW_LINE> <INDENT> theta = gs.pi / 6 <NEW_LINE> initial_tangent_vec = gs.array( [[0.0, -theta, 0.5], [theta, 0.0, 0.5], [0.0, 0.0, 0.0]] ) <NEW_LINE> t = gs.linspace(-2.0, 2.0, N_STEPS + 1) <NEW_LINE> tangent_vec = gs.einsum("t,ij->tij", t, initial_tangent_vec) <NEW_LINE> group_geo_points = SE2_GROUP.exp(tangent_vec) <NEW_LINE> left_geo_points = LEFT_METRIC.exp(tangent_vec) <NEW_LINE> right_geo_points = RIGHT_METRIC.exp(tangent_vec) <NEW_LINE> ax = visualization.plot( group_geo_points, space="SE2_GROUP", color="black", label="Group" ) <NEW_LINE> ax = visualization.plot( left_geo_points, ax=ax, space="SE2_GROUP", color="yellow", label="Left" ) <NEW_LINE> ax = visualization.plot( right_geo_points, ax=ax, space="SE2_GROUP", color="green", label="Right by Integration", ) <NEW_LINE> ax.set_aspect("equal") <NEW_LINE> plt.legend(loc="best") <NEW_LINE> plt.show()
Plot geodesics on SE(2) with different structures.
625941b856b00c62f0f144be
def save(self, product): <NEW_LINE> <INDENT> if product.name is None: <NEW_LINE> <INDENT> raise DataValidationError('name attribute is not set and it is required') <NEW_LINE> <DEDENT> if product.id <= 0: <NEW_LINE> <INDENT> product.set_id(self.next_index()) <NEW_LINE> <DEDENT> self.redis.set(product.id, pickle.dumps(product.serialize()))
Saves a Product to the data store This includes save a new Product or Update a product with the same id
625941b866673b3332b91ef7
def update_release(self, record: Record) -> None: <NEW_LINE> <INDENT> if record.version == Version.zero(): <NEW_LINE> <INDENT> self._unreleased.body = record.body <NEW_LINE> return <NEW_LINE> <DEDENT> for old_record in self.released: <NEW_LINE> <INDENT> if record.version == old_record.version: <NEW_LINE> <INDENT> old_record.body = record.body <NEW_LINE> old_record.created = record.created <NEW_LINE> self.format_released() <NEW_LINE> return <NEW_LINE> <DEDENT> <DEDENT> self.add_release(record)
Add or update release `record`. Arguments: record -- Record to update.
625941b867a9b606de4a7d1d
def experimental_packages(): <NEW_LINE> <INDENT> return _package_lists_from_sage_output('experimental')
Return two lists. The first contains the installed and the second contains the not-installed experimental packages that are available from the Sage repository. You must have an internet connection. OUTPUT: - installed experimental packages (as a list) - NOT installed experimental packages (as a list) Use ``install_package(package_name)`` to install or re-install a given package. .. seealso:: :func:`install_package`, :func:`upgrade` EXAMPLE:: sage: from sage.misc.package import experimental_packages sage: installed, not_installed = experimental_packages() # optional internet sage: min(installed+not_installed) # optional internet 'PyQt4' sage: max(installed+not_installed) # optional internet 'yassl'
625941b850812a4eaa59c186
def itemmeta(self): <NEW_LINE> <INDENT> raise NotSupportedError()
There is no metadata available for class:``Loggers``. Any call to this method raises a class:``NotSupportedError``. :raises: class:``NotSupportedError``
625941b85fdd1c0f98dc0092
def dataToValue(mobj, mplug=None): <NEW_LINE> <INDENT> u <NEW_LINE> dtype = mobj.apiType() <NEW_LINE> proc = _DATATOVAL_DICT_get(dtype) <NEW_LINE> if proc: <NEW_LINE> <INDENT> return proc(mobj) <NEW_LINE> <DEDENT> if dtype != _MFn_kInvalid: <NEW_LINE> <INDENT> typ = _DATA_APITYPE_TYPE_DICT_get(dtype) <NEW_LINE> if typ: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> return _dataValueByParsing(mobj, typ, mplug) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> <DEDENT> raise ValueError('data type not supported: ' + mobj.apiTypeStr)
データ MObject から値を得る。
625941b844b2445a33931f00
def __init__(self): <NEW_LINE> <INDENT> self._content = []
Create a new, empty Stack self. Overrides Container.__init__
625941b89c8ee82313fbb5d5
def is_with_faces(self, img: Image): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> if self._is_selfie(*self._find_faces(img)): <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> print(e) <NEW_LINE> return True
Indicates whether there were faces detected on the image :param img: Image object :return: boolean indicator: True if there are faces on the image
625941b83d592f4c4ed1cee0
def group(self, *columns): <NEW_LINE> <INDENT> for column in columns: <NEW_LINE> <INDENT> Query._add_sequence_or_value(column, self._group) <NEW_LINE> <DEDENT> self._str = None <NEW_LINE> return self
Adds one or more columns to be used in the query's GROUP BY clause Example usage: query.group('d.id', 'd.title')
625941b8f8510a7c17cf9565