code
stringlengths
4
4.48k
docstring
stringlengths
1
6.45k
_id
stringlengths
24
24
def approximate_spd( panel_properties: Dict[str, Any], des_distance: float, des_intensity: float, des_spectrum: Dict[str, float], ) -> Tuple[Dict, Dict, float]: <NEW_LINE> <INDENT> desired_spd_dict = calculate_spd_dict(des_intensity, des_spectrum) <NEW_LINE> desired_spd_vector = accessors.vectorize_dict(desired_spd_dict) <NEW_LINE> raw_channel_spd_ndict = build_channel_spd_ndict(panel_properties, des_distance) <NEW_LINE> channel_spd_ndict = translate_spd_ndict(raw_channel_spd_ndict, desired_spd_dict) <NEW_LINE> channel_spd_matrix = accessors.matrixify_nested_dict(channel_spd_ndict) <NEW_LINE> channel_setpoint_vector = solve_setpoints(channel_spd_matrix, desired_spd_vector) <NEW_LINE> channel_setpoint_list = [] <NEW_LINE> for setpoint in channel_setpoint_vector: <NEW_LINE> <INDENT> channel_setpoint_list.append(setpoint * 100) <NEW_LINE> <DEDENT> channel_types = panel_properties.get("channel_types", {}) <NEW_LINE> channel_setpoint_dict = accessors.dictify_list(channel_setpoint_list, channel_types) <NEW_LINE> output_spd_list = calculate_output_spd(channel_spd_matrix, channel_setpoint_vector) <NEW_LINE> output_spectrum_list, output_intensity = deconstruct_spd(output_spd_list) <NEW_LINE> output_spectrum_dict = accessors.dictify_list( output_spectrum_list, desired_spd_dict ) <NEW_LINE> mapped_channel_setpoint_dict = {} <NEW_LINE> channels = panel_properties.get("channels", {}) <NEW_LINE> for channel_name, channel_entry in channels.items(): <NEW_LINE> <INDENT> key = channel_entry.get("type") <NEW_LINE> setpoint = channel_setpoint_dict.get(key, 0) <NEW_LINE> mapped_channel_setpoint_dict[channel_name] = round(setpoint, 2) <NEW_LINE> <DEDENT> return mapped_channel_setpoint_dict, output_spectrum_dict, output_intensity
Approximates spectral power distribution.
625941b85fcc89381b1e150f
def process_links(txt, prefix): <NEW_LINE> <INDENT> txt = re.sub(r'[^\[][0,2](attachment:.*)[^\]][0,2]', '[[' + "\\1" + ']]', txt) <NEW_LINE> txt = re.sub(r'\[\[(.*?)\]\]', lambda matchobj: '[[' + convert_link(matchobj, prefix) + ']]', txt) <NEW_LINE> if prefix: <NEW_LINE> <INDENT> wikiword_re = re.compile(r'(\s)([A-Z][a-z]+[A-Z]+[a-zA-Z]+)') <NEW_LINE> txt = wikiword_re.sub(r'\1[[' + prefix + '/' + r'\2]]', txt) <NEW_LINE> <DEDENT> return txt
convert links to moin syntax
625941b8498bea3a759b98fc
def step(self, step=None): <NEW_LINE> <INDENT> activearrays = self.pre_step(step) <NEW_LINE> h0 = red2comp( activearrays["hessian"], self.im.dbeads.nbeads, self.im.dbeads.natoms, self.im.coef, ) <NEW_LINE> h1 = np.add(self.im.h, h0) <NEW_LINE> d, w = clean_hessian( h1, self.im.dbeads.q, self.im.dbeads.natoms, self.im.dbeads.nbeads, self.im.dbeads.m, self.im.dbeads.m3, self.options["hessian_asr"], ) <NEW_LINE> info( "\n@Nichols: 1st freq {} cm^-1".format( units.unit_to_user( "frequency", "inversecm", np.sign(d[0]) * np.sqrt(np.absolute(d[0])) ) ), verbosity.medium, ) <NEW_LINE> info( "@Nichols: 2nd freq {} cm^-1".format( units.unit_to_user( "frequency", "inversecm", np.sign(d[1]) * np.sqrt(np.absolute(d[1])) ) ), verbosity.medium, ) <NEW_LINE> info( "@Nichols: 3rd freq {} cm^-1".format( units.unit_to_user( "frequency", "inversecm", np.sign(d[2]) * np.sqrt(np.absolute(d[2])) ) ), verbosity.medium, ) <NEW_LINE> if self.options["mode"] == "rate": <NEW_LINE> <INDENT> f = activearrays["old_f"] * (self.im.coef[1:] + self.im.coef[:-1]) / 2 <NEW_LINE> d_x = nichols( f, self.im.f, d, w, self.im.dbeads.m3, activearrays["big_step"] ) <NEW_LINE> <DEDENT> elif self.options["mode"] == "splitting": <NEW_LINE> <INDENT> d_x = nichols( activearrays["old_f"], self.im.f, d, w, self.im.dbeads.m3, activearrays["big_step"], mode=0, ) <NEW_LINE> <DEDENT> if np.amax(np.absolute(d_x)) > activearrays["big_step"]: <NEW_LINE> <INDENT> info( "Step norm, scaled down to {}".format(activearrays["big_step"]), verbosity.low, ) <NEW_LINE> d_x *= activearrays["big_step"] / np.amax(np.absolute(d_x)) <NEW_LINE> <DEDENT> d_x_full = self.fix.get_full_vector(d_x, t=1) <NEW_LINE> new_x = self.optarrays["old_x"].copy() + d_x_full <NEW_LINE> self.post_step(step, new_x, d_x, activearrays)
Does one simulation time step.
625941b867a9b606de4a7d08
@utils.arg('--limit', metavar='<NUMBER>', default=20, help='Page limit') <NEW_LINE> @utils.arg('--offset', metavar='<OFFSET>', help='Page offset') <NEW_LINE> @utils.arg('--order-by', metavar='<ORDER_BY>', help='Name of fields order by') <NEW_LINE> @utils.arg('--order', metavar='<ORDER>', choices=['desc', 'asc'], help='order') <NEW_LINE> @utils.arg('--details', action='store_true', help='More detailed list') <NEW_LINE> @utils.arg('--search', metavar='<KEYWORD>', help='Filter result by simple keyword search') <NEW_LINE> @utils.arg('--meta', action='store_true', help='Piggyback metadata') <NEW_LINE> @utils.arg('--filter', metavar='<FILTER>', action='append', help='Filters') <NEW_LINE> @utils.arg('--filter-any', action='store_true', help='If true, match if any of the filters matches; otherwise, match if all of the filters match') <NEW_LINE> @utils.arg('--admin', action='store_true', help='Is admin call?') <NEW_LINE> @utils.arg('--tenant', metavar='<TENANT>', help='Tenant ID or Name') <NEW_LINE> @utils.arg('--field', metavar='<FIELD>', action='append', help='Show only specified fields') <NEW_LINE> def do_region_quota_list(client, args): <NEW_LINE> <INDENT> page_info = utils.get_paging_info(args) <NEW_LINE> regionquotas = client.regionquotas.list(**page_info) <NEW_LINE> utils.print_list(regionquotas, client.regionquotas.columns)
List all RegionQuota
625941b8a17c0f6771cbde9f
def do_fit( self, fit_function=None, x_data=None, y_data=None, channel_index=0, pixel_fit=False): <NEW_LINE> <INDENT> self.coord = None <NEW_LINE> if pixel_fit and np.count_nonzero(self.sweep_images) != 0: <NEW_LINE> <INDENT> frames = self.sweep_images / self.elapsed_sweeps <NEW_LINE> frames[:] = [cv2.flip(frame, 0) for frame in frames] <NEW_LINE> frames1 = np.zeros((np.shape(frames)[0], 600, 600)) <NEW_LINE> frames1[:] = [ cv2.resize( frame, (600, 600), interpolation=cv2.INTER_AREA) for frame in frames] <NEW_LINE> frames = frames1 <NEW_LINE> self.do_pixel_spectrum(frames) <NEW_LINE> if self.coord is not None: <NEW_LINE> <INDENT> x_data = self.odmr_plot_x <NEW_LINE> y_data = np.zeros( [len(self.get_odmr_channels()), self.odmr_plot_x.size]) <NEW_LINE> y_data[0] = frames[:, self.coord[0], self.coord[1]] <NEW_LINE> self.sigOdmrPlotsUpdated.emit( x_data, y_data, self.odmr_plot_xy) <NEW_LINE> y_data = y_data[0] <NEW_LINE> <DEDENT> <DEDENT> if not pixel_fit: <NEW_LINE> <INDENT> self.sigOdmrPlotsUpdated.emit( self.odmr_plot_x, self.odmr_plot_y, self.odmr_plot_xy) <NEW_LINE> <DEDENT> if (x_data is None) or (y_data is None): <NEW_LINE> <INDENT> x_data = self.odmr_plot_x <NEW_LINE> y_data = self.odmr_plot_y[channel_index] <NEW_LINE> <DEDENT> if fit_function is not None and isinstance(fit_function, str): <NEW_LINE> <INDENT> if fit_function in self.get_fit_functions(): <NEW_LINE> <INDENT> self.fc.set_current_fit(fit_function) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.fc.set_current_fit('No Fit') <NEW_LINE> if fit_function != 'No Fit': <NEW_LINE> <INDENT> self.log.warning( 'Fit function "{0}" not available in ODMRLogic fit container.' ''.format(fit_function)) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> self.odmr_fit_x, self.odmr_fit_y, result = self.fc.do_fit( x_data, y_data) <NEW_LINE> if result is None: <NEW_LINE> <INDENT> result_str_dict = {} <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> result_str_dict = result.result_str_dict <NEW_LINE> <DEDENT> self.sigOdmrFitUpdated.emit( self.odmr_fit_x, self.odmr_fit_y, result_str_dict, self.fc.current_fit) <NEW_LINE> return
Execute the currently configured fit on the measurement data. Optionally on passed data
625941b8dc8b845886cb5380
def get_bin_seeds(X, bin_size, min_bin_freq=1): <NEW_LINE> <INDENT> bin_sizes = defaultdict(int) <NEW_LINE> for point in X: <NEW_LINE> <INDENT> binned_point = np.round(point / bin_size) <NEW_LINE> bin_sizes[tuple(binned_point)] += 1 <NEW_LINE> <DEDENT> bin_seeds = np.array([point for point, freq in six.iteritems(bin_sizes) if freq >= min_bin_freq], dtype=np.float32) <NEW_LINE> if len(bin_seeds) == len(X): <NEW_LINE> <INDENT> warnings.warn("Binning data failed with provided bin_size=%f," " using data points as seeds." % bin_size) <NEW_LINE> return X <NEW_LINE> <DEDENT> bin_seeds = bin_seeds * bin_size <NEW_LINE> return bin_seeds
Finds seeds for mean_shift. Finds seeds by first binning data onto a grid whose lines are spaced bin_size apart, and then choosing those bins with at least min_bin_freq points. Parameters ---------- X : array-like, shape=[n_samples, n_features] Input points, the same points that will be used in mean_shift. bin_size : float Controls the coarseness of the binning. Smaller values lead to more seeding (which is computationally more expensive). If you're not sure how to set this, set it to the value of the bandwidth used in clustering.mean_shift. min_bin_freq : integer, optional Only bins with at least min_bin_freq will be selected as seeds. Raising this value decreases the number of seeds found, which makes mean_shift computationally cheaper. Returns ------- bin_seeds : array-like, shape=[n_samples, n_features] Points used as initial kernel positions in clustering.mean_shift.
625941b87047854f462a1258
def __len__(self): <NEW_LINE> <INDENT> return self.n
Return the number of nodes in the trie.
625941b8adb09d7d5db6c5de
def alias(self): <NEW_LINE> <INDENT> return _spacegrant_swig.hdlc_framer_sptr_alias(self)
alias(hdlc_framer_sptr self) -> std::string
625941b899fddb7c1c9de1de
def test_login(self): <NEW_LINE> <INDENT> authenticator = Authenticator({'test': HiveUser('test', 'test')}) <NEW_LINE> Session.authenticator = authenticator <NEW_LINE> sessions = {} <NEW_LINE> users = {'test': HiveUser('test', 'test')} <NEW_LINE> cap = http.http(sessions, {'enabled': 'True', 'port': 0}, users, self.work_dir) <NEW_LINE> socket = create_socket(('0.0.0.0', 0)) <NEW_LINE> srv = StreamServer(socket, cap.handle_session) <NEW_LINE> srv.start() <NEW_LINE> client = httplib.HTTPConnection('127.0.0.1', srv.server_port) <NEW_LINE> client.putrequest('GET', '/') <NEW_LINE> client.putheader('Authorization', 'Basic ' + base64.b64encode('test:test')) <NEW_LINE> client.endheaders() <NEW_LINE> response = client.getresponse() <NEW_LINE> self.assertEqual(response.status, 200) <NEW_LINE> srv.stop()
Tries to login using the username/password as test/test.
625941b899fddb7c1c9de1df
def set_stance(self, stance: Stance): <NEW_LINE> <INDENT> if isinstance(self.__toy, (R2D2, R2Q5)): <NEW_LINE> <INDENT> if stance == Stance.Bipod: <NEW_LINE> <INDENT> ToyUtil.perform_leg_action(self.__toy, R2LegActions.TWO_LEGS) <NEW_LINE> <DEDENT> elif stance == Stance.Tripod: <NEW_LINE> <INDENT> ToyUtil.perform_leg_action(self.__toy, R2LegActions.THREE_LEGS) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError(f'Stance {stance} is not supported')
Changes the stance between bipod and tripod. Set to bipod using ``set_stance(Stance.Bipod)`` and to tripod using ``set_stance(Stance.Tripod)``. Tripod is required for rolling.
625941b863b5f9789fde6f31
def recompute_grad(fn): <NEW_LINE> <INDENT> @functools.wraps(fn) <NEW_LINE> def wrapped(*args): <NEW_LINE> <INDENT> return _recompute_grad(fn, args) <NEW_LINE> <DEDENT> return wrapped
Decorator that recomputes the function on the backwards pass. Args: fn: a function that takes Tensors (all as positional arguments) and returns a tuple of Tensors. Returns: A wrapped fn that is identical to fn when called, but its activations will be discarded and recomputed on the backwards pass (i.e. on a call to tf.gradients).
625941b8097d151d1a222ca7
def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)): <NEW_LINE> <INDENT> nb_filter1, nb_filter2, nb_filter3 = filters <NEW_LINE> conv_name_base = 'res' + str(stage) + block + '_branch' <NEW_LINE> bn_name_base = 'bn' + str(stage) + block + '_branch' <NEW_LINE> x = Convolution2D(nb_filter1, 1, 1, subsample=strides, name=conv_name_base + '2a')(input_tensor) <NEW_LINE> x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x) <NEW_LINE> x = Activation('relu')(x) <NEW_LINE> x = Convolution2D(nb_filter2, kernel_size, kernel_size, border_mode='same', name=conv_name_base + '2b')(x) <NEW_LINE> x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x) <NEW_LINE> x = Activation('relu')(x) <NEW_LINE> x = Convolution2D(nb_filter3, 1, 1, name=conv_name_base + '2c')(x) <NEW_LINE> x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x) <NEW_LINE> shortcut = Convolution2D(nb_filter3, 1, 1, subsample=strides, name=conv_name_base + '1')(input_tensor) <NEW_LINE> shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1')(shortcut) <NEW_LINE> x = add([x, shortcut], name = 'res'+str(stage)+block) <NEW_LINE> x = Activation('relu')(x) <NEW_LINE> return x
conv_block is the block that has a conv layer at shortcut # Arguments input_tensor: input tensor kernel_size: defualt 3, the kernel size of middle conv layer at main path filters: list of integers, the nb_filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names Note that from stage 3, the first conv layer at main path is with subsample=(2,2) And the shortcut should have subsample=(2,2) as well
625941b8004d5f362079a182
def random_seeds(size, entropy=None): <NEW_LINE> <INDENT> return np.random.SeedSequence(entropy).generate_state(size)
Generates a sequence of most likely independent seeds.
625941b8e5267d203edcdaec
def _testLoadPinout(): <NEW_LINE> <INDENT> print(icGenerator.loadPinout("pinoutTest.ods"))
test for loadPinout function
625941b8925a0f43d2549cbf
def began_convergence_lossfun(y_true, y_pred, gamma): <NEW_LINE> <INDENT> x_hat = y_pred[..., 0] <NEW_LINE> x_hat_reconstructed = y_pred[..., 1] <NEW_LINE> x_real = y_true[..., 0] <NEW_LINE> x_real_reconstructed = y_pred[..., 2] <NEW_LINE> fake_ae_loss = K.mean(K.abs(x_hat - x_hat_reconstructed)) <NEW_LINE> real_ae_loss = K.mean(K.abs(x_real - x_real_reconstructed)) <NEW_LINE> return real_ae_loss + K.abs(gamma * real_ae_loss - fake_ae_loss)
y_pred[:,0]: (Gx(z)) y_pred[:,1]: D(Gx(z)) y_pred[:,2]: D(x) y_true: x
625941b8d18da76e2353231d
def add_handler(self, name: str): <NEW_LINE> <INDENT> if name in self.handlers: <NEW_LINE> <INDENT> return self.handlers[name] <NEW_LINE> <DEDENT> self.handlers[name] = handler = Handler(name) <NEW_LINE> self.add_function(name, lambda nme, inp: Handler._recursive_handle(handler, nme, inp)) <NEW_LINE> return handler
If a handler hasn't been added, a new handler is created. A function is also added in order to mimic a recursive call to handle. If a handler has previously been addend, that instance is returned. :param name: The name for the handler :return: A handler
625941b85166f23b2e1a4fa4
def nvidia_model_small(): <NEW_LINE> <INDENT> model = Sequential() <NEW_LINE> model.add(Conv2D(8, (5, 5), strides=(2, 2), activation="relu", input_shape=(67, 320, 1))) <NEW_LINE> model.add(Conv2D(12, (5, 5), strides=(2, 2), activation="relu")) <NEW_LINE> model.add(Conv2D(16, (5, 5), strides=(2, 2), activation="relu")) <NEW_LINE> model.add(Conv2D(24, (3, 3), strides=(1, 1), activation="relu")) <NEW_LINE> model.add(Conv2D(24, (3, 3), strides=(1, 1), activation="relu")) <NEW_LINE> model.add(Flatten()) <NEW_LINE> model.add(Dense(100, activation="relu")) <NEW_LINE> model.add(Dense(50, activation="relu")) <NEW_LINE> model.add(Dense(10, activation="relu")) <NEW_LINE> model.add(Dense(1)) <NEW_LINE> model.compile(optimizer="adam", loss="mse", metrics=['accuracy']) <NEW_LINE> return model
Designed for single layer grayscale input.
625941b8ad47b63b2c509dd5
def setmem(vm_, memory, config=False, **kwargs): <NEW_LINE> <INDENT> conn = __get_conn(**kwargs) <NEW_LINE> dom = _get_domain(conn, vm_) <NEW_LINE> if VIRT_STATE_NAME_MAP.get(dom.info()[0], "unknown") != "shutdown": <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> flags = libvirt.VIR_DOMAIN_MEM_MAXIMUM <NEW_LINE> if config: <NEW_LINE> <INDENT> flags = flags | libvirt.VIR_DOMAIN_AFFECT_CONFIG <NEW_LINE> <DEDENT> ret1 = dom.setMemoryFlags(memory * 1024, flags) <NEW_LINE> ret2 = dom.setMemoryFlags(memory * 1024, libvirt.VIR_DOMAIN_AFFECT_CURRENT) <NEW_LINE> conn.close() <NEW_LINE> return ret1 == ret2 == 0
Changes the amount of memory allocated to VM. The VM must be shutdown for this to work. :param vm_: name of the domain :param memory: memory amount to set in MB :param config: if True then libvirt will be asked to modify the config as well :param connection: libvirt connection URI, overriding defaults .. versionadded:: 2019.2.0 :param username: username to connect with, overriding defaults .. versionadded:: 2019.2.0 :param password: password to connect with, overriding defaults .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' virt.setmem <domain> <size> salt '*' virt.setmem my_domain 768
625941b8009cb60464c63208
def pmf(self, k): <NEW_LINE> <INDENT> if (k > self.m) | (k != int(k)): <NEW_LINE> <INDENT> raise ValueError("k must be an integer between 0 and m, inclusive") <NEW_LINE> <DEDENT> if self.p == 1: <NEW_LINE> <INDENT> p_k = 1 if k == self.m else 0 <NEW_LINE> <DEDENT> elif self.p == 0: <NEW_LINE> <INDENT> p_k = 1 if k == 0 else 0 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> p_k = np.exp((self.nu * log(comb(self.m, k))) + (k*log(self.p)) + ((self.m-k) * log(1-self.p)))/self.normaliser <NEW_LINE> <DEDENT> return p_k
Probability mass function. Uses exponents and logs to avoid overflow. Arguments: self, ConwayMaxwellBinomial object, k, int, must be an integer in the interval [0, m] Returns: P(k)
625941b8046cf37aa974cb96
@task <NEW_LINE> def seed_kafka(kafka_hosts="streamparse-box:9092", topic_name="pixels", num_pixels=100000): <NEW_LINE> <INDENT> kafka = KafkaClient(kafka_hosts) <NEW_LINE> producer = SimpleProducer(kafka) <NEW_LINE> puts("Seeding Kafka ({}) topic '{}' with {:,} fake pixels..." .format(kafka_hosts, topic_name, num_pixels)) <NEW_LINE> pixels = random_pixel_generator() <NEW_LINE> for i in range(num_pixels): <NEW_LINE> <INDENT> pixel = json.dumps(next(pixels)).encode("utf-8", "ignore") <NEW_LINE> try: <NEW_LINE> <INDENT> producer.send_messages(topic_name, pixel) <NEW_LINE> <DEDENT> except UnknownTopicOrPartitionError: <NEW_LINE> <INDENT> puts('Topic did not exist yet, so sleeping and trying again...', flush=True) <NEW_LINE> time.sleep(3) <NEW_LINE> <DEDENT> puts(i, end='\r', flush=True)
Seed the local Kafka cluster's "pixels" topic with sample pixel data.
625941b876d4e153a657e97c
def p_c_p(t): <NEW_LINE> <INDENT> t[0] = ExpresionBinaria(t[2], t[3], OPERACION_RELACIONAL.IGUAL) <NEW_LINE> global gramatical <NEW_LINE> gramatical.append( " expresion.val := expresion.val == expresion.val")
c_p : IGUALQUE e c_p
625941b8b7558d58953c4d67
def compare_timestamps(ts1: str, ts2: str) -> bool: <NEW_LINE> <INDENT> ts1_head, ts1_pred = ts1.split("_") <NEW_LINE> ts2_head, ts2_pred = ts2.split("_") <NEW_LINE> return int(ts1_head) > int(ts2_head) or int(ts1_pred) > int(ts2_pred)
Compares the timestamp of two combined timestamp strings and determines if the first one is newer than the second one. Args: ts1: the first combined timestamp string ts2: the second combined timestamp string Returns: True if ``ts1`` is newer than ``ts2``
625941b87b180e01f3dc4651
def runBlocking(self,selector=selectors.DefaultSelector): <NEW_LINE> <INDENT> if self._is_started: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> self.initialize() <NEW_LINE> self.connect() <NEW_LINE> with self._selector_lock: <NEW_LINE> <INDENT> if self._is_started: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> self.selector = selector() <NEW_LINE> self.selector.register(self.sock,selectors.EVENT_READ,[self._sock_ready,self]) <NEW_LINE> self.selector.register(self._irqrecv,selectors.EVENT_READ,[self._sock_ready,None]) <NEW_LINE> self.selector.register(self._irqsend,selectors.EVENT_READ,[self._sock_ready,None]) <NEW_LINE> self._is_started = True <NEW_LINE> self.sendEvent("peng3dnet:client.start",{}) <NEW_LINE> <DEDENT> while self.run: <NEW_LINE> <INDENT> events = self.selector.select() <NEW_LINE> for key,mask in events: <NEW_LINE> <INDENT> callback,data = key.data <NEW_LINE> try: <NEW_LINE> <INDENT> callback(key.fileobj, mask, data) <NEW_LINE> <DEDENT> except Exception: <NEW_LINE> <INDENT> import traceback;traceback.print_exc()
Runs the client main loop in a blocking manner. ``selector`` may be changed to override the selector used for smart waiting. This method blocks until :py:meth:`stop()` is called.
625941b823849d37ff7b2edd
def multi_lasso_plot(X,CO_response,ethylene_response,supress=False): <NEW_LINE> <INDENT> from sklearn import linear_model <NEW_LINE> y=[] <NEW_LINE> for i in range(0,395): <NEW_LINE> <INDENT> y.append([CO_response[i],ethylene_response[i]]) <NEW_LINE> <DEDENT> model2 = linear_model.MultiTaskLasso(alpha=1200) <NEW_LINE> n=int(len(CO_response)*0.7) <NEW_LINE> if isinstance(X,pd.core.frame.DataFrame): <NEW_LINE> <INDENT> X=np.asarray(X) <NEW_LINE> <DEDENT> X_train=X[:n,:] <NEW_LINE> y_train=y[:n] <NEW_LINE> X_test=X[n:,:] <NEW_LINE> y_test=y[n:] <NEW_LINE> model2.fit(X_train, y_train) <NEW_LINE> ypred=model2.predict(X_test) <NEW_LINE> co_pred=[] <NEW_LINE> eth_pred=[] <NEW_LINE> for i in range(0,len(ypred)): <NEW_LINE> <INDENT> co_pred.append(ypred[i][0]) <NEW_LINE> eth_pred.append(ypred[i][1]) <NEW_LINE> <DEDENT> if supress==False : <NEW_LINE> <INDENT> plt.figure(1,figsize=(30,20)) <NEW_LINE> plt.plot(CO_response[n:],color='blue') <NEW_LINE> plt.plot(co_pred,color='red') <NEW_LINE> plt.legend(['CO_pred', 'CO_true'],fontsize=36) <NEW_LINE> plt.figure(2,figsize=(30,20)) <NEW_LINE> plt.plot(ethylene_response[n:],color='orange') <NEW_LINE> plt.plot(eth_pred,color='purple') <NEW_LINE> plt.legend(['eth_pred', 'eth_true'],fontsize=36) <NEW_LINE> plt.show() <NEW_LINE> <DEDENT> return [co_pred,eth_pred]
Multi Task Lasso # X is the (m x n) feature vector. This can be an array or a pandas data frame. # CO_response is the m dimensional vector with the true CO values # ethylene_response is the m dimensional vector with the true ethylene values #If suppress is true, the plots are supressed.
625941b85166f23b2e1a4fa5
def __init__(self, explicit=False, is_run_shell=True): <NEW_LINE> <INDENT> self.platform_id = 2 <NEW_LINE> if sys.platform == 'win32': <NEW_LINE> <INDENT> self.platform_id = 0 <NEW_LINE> <DEDENT> if sys.platform == 'darwin': <NEW_LINE> <INDENT> self.platform_id = 1 <NEW_LINE> <DEDENT> info = publishinfo() <NEW_LINE> name = os.path.basename(info[0]) <NEW_LINE> name = name.replace('.py','') <NEW_LINE> module = info[1] <NEW_LINE> module = str(module).replace('<','') <NEW_LINE> module = module.replace('>','') <NEW_LINE> name += '_' + module <NEW_LINE> self.source_startrow = info[2] - 1 <NEW_LINE> self.explicit = explicit <NEW_LINE> self.t_html = '' <NEW_LINE> f = open(os.path.dirname(__file__)+'/test.html', 'r') <NEW_LINE> self.t_html = f.read() <NEW_LINE> if agl.IsRunAtCmd() or sys.version>'3': <NEW_LINE> <INDENT> self.t_html = self.t_html.replace('utf-8', 'gb2312') <NEW_LINE> <DEDENT> f.close() <NEW_LINE> self.name = name <NEW_LINE> self.path = os.getcwd() + "/html/" <NEW_LINE> if not os.path.isdir("html"): <NEW_LINE> <INDENT> os.mkdir("html") <NEW_LINE> <DEDENT> self.redirect_fname = 'html/log'+str(os.getpid())+'.txt' <NEW_LINE> if self.platform_id == 0: <NEW_LINE> <INDENT> self.logfile = open(self.redirect_fname, "w") <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.logfile = codecs.open(self.redirect_fname,'w', encoding='utf_16') <NEW_LINE> <DEDENT> self.oldstdout = sys.stdout <NEW_LINE> sys.stdout = self.logfile <NEW_LINE> self.figs = [] <NEW_LINE> self.imgs = [] <NEW_LINE> self.use_figure = False <NEW_LINE> self.myimgs = '' <NEW_LINE> self.is_run_shell = is_run_shell
编码默认为utf8 info: 由pushlishinfo获取的元组 explicit: 弹出页面是否使用显式调用 is_run_shell: bool 是否跑生成后的start
625941b80a50d4780f666cdb
def _cartesian_to_llh(x, y, z, model): <NEW_LINE> <INDENT> a, _, _, e2 = ELLIPSOID_MODELS[model] <NEW_LINE> p = math.sqrt(x*x+y*y) <NEW_LINE> lam = math.atan2(y, x) <NEW_LINE> phi = math.atan2(z, p*(1-e2)) <NEW_LINE> while True: <NEW_LINE> <INDENT> sp = math.sin(phi) <NEW_LINE> nu = a / math.sqrt(1 - e2*sp*sp) <NEW_LINE> oldphi = phi <NEW_LINE> phi = math.atan2(z+e2*nu*sp, p) <NEW_LINE> if abs(oldphi-phi) < 1E-12: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> <DEDENT> return ( phi * 57.29577951308232087679815481410517, lam * 57.29577951308232087679815481410517, p/math.cos(phi) - nu )
Approximate conversion from plane to spherical coordinates. Used as part of the Helmert transformation used outside the OSTN02 area. >>> _cartesian_to_llh(3841039.2016489909, -201300.3346975291, 5070178.453880735, 'OSGB36') (53.0, -3.0, 10.0)
625941b85fdd1c0f98dc007d
def get_Description(self): <NEW_LINE> <INDENT> return super(IServerObjectConfiguration, self).get_Description()
Method IServerObjectConfiguration.get_Description OUTPUT desc : BSTR*
625941b891af0d3eaac9b860
def say_hi(name, age): <NEW_LINE> <INDENT> print("Hello " + name + "! you are " + age + " years old.")
name = input("Enter Name: ")
625941b80a366e3fb873e663
def _select_last_modified_file(self): <NEW_LINE> <INDENT> role = self.model.DateModifiedRole <NEW_LINE> view = self.widgets["list"] <NEW_LINE> model = view.model() <NEW_LINE> highest_index = None <NEW_LINE> highest = 0 <NEW_LINE> for row in range(model.rowCount()): <NEW_LINE> <INDENT> index = model.index(row, 0, parent=QtCore.QModelIndex()) <NEW_LINE> if not index.isValid(): <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> modified = index.data(role) <NEW_LINE> if modified > highest: <NEW_LINE> <INDENT> highest_index = index <NEW_LINE> highest = modified <NEW_LINE> <DEDENT> <DEDENT> if highest_index: <NEW_LINE> <INDENT> view.setCurrentIndex(highest_index)
Utility function to select the file with latest date modified
625941b855399d3f055884ff
def test_set_new_password_for_own_user(self): <NEW_LINE> <INDENT> self.send_request('GET', 'users', username=self.testuser['username'], password=self.testuser['password']) <NEW_LINE> initial_pass = self.testuser['password'] <NEW_LINE> self.testuser['password'] = 'new_pass' <NEW_LINE> self.send_request('PUT', 'users', username=self.testuser['username'], password=initial_pass, obj_id=self.testuser['id'], data=self.testuser) <NEW_LINE> self.send_request('GET', 'users', username=self.testuser['username'], password=self.testuser['password'])
Try to refresh the password of the current testuser.
625941b894891a1f4081b8f4
def __init__(self, **kwargs) : <NEW_LINE> <INDENT> self.alphabet = None <NEW_LINE> self.creator_text = release_description <NEW_LINE> self.logo_title = "" <NEW_LINE> self.logo_label = "" <NEW_LINE> self.stacks_per_line = 40 <NEW_LINE> self.unit_name = "bits" <NEW_LINE> self.show_yaxis = True <NEW_LINE> self.yaxis_label = None <NEW_LINE> self.yaxis_tic_interval = 1. <NEW_LINE> self.yaxis_minor_tic_ratio = 5 <NEW_LINE> self.yaxis_scale = None <NEW_LINE> self.show_xaxis = True <NEW_LINE> self.xaxis_label = "" <NEW_LINE> self.xaxis_tic_interval =1 <NEW_LINE> self.rotate_numbers = False <NEW_LINE> self.number_interval = 5 <NEW_LINE> self.show_ends = False <NEW_LINE> self.annotate = None <NEW_LINE> self.show_fineprint = True <NEW_LINE> self.fineprint = "" <NEW_LINE> self.show_boxes = False <NEW_LINE> self.shrink_fraction = 0.5 <NEW_LINE> self.show_errorbars = True <NEW_LINE> self.errorbar_fraction = 0.90 <NEW_LINE> self.errorbar_width_fraction = 0.25 <NEW_LINE> self.errorbar_gray = 0.75 <NEW_LINE> self.resolution = 96. <NEW_LINE> self.default_color = Color.by_name("black") <NEW_LINE> self.color_scheme = None <NEW_LINE> self.debug = False <NEW_LINE> self.logo_margin = 2 <NEW_LINE> self.stroke_width = 0.5 <NEW_LINE> self.tic_length = 5 <NEW_LINE> self.stack_width = std_sizes["medium"] <NEW_LINE> self.stack_aspect_ratio = 5 <NEW_LINE> self.stack_margin = 0.5 <NEW_LINE> self.pad_right = False <NEW_LINE> self.small_fontsize = 6 <NEW_LINE> self.fontsize = 10 <NEW_LINE> self.title_fontsize = 12 <NEW_LINE> self.number_fontsize = 8 <NEW_LINE> self.text_font = "ArialMT" <NEW_LINE> self.logo_font = "Arial-BoldMT" <NEW_LINE> self.title_font = "ArialMT" <NEW_LINE> self.first_index = 1 <NEW_LINE> self.logo_start = None <NEW_LINE> self.logo_end=None <NEW_LINE> self.scale_width = True <NEW_LINE> self.reverse_stacks = True <NEW_LINE> from corebio.utils import update <NEW_LINE> update(self, **kwargs)
Create a new LogoOptions instance. >>> L = LogoOptions(logo_title = "Some Title String") >>> L.show_yaxis = False >>> repr(L)
625941b897e22403b379cde5
def main(): <NEW_LINE> <INDENT> os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'orgbranchs.settings') <NEW_LINE> try: <NEW_LINE> <INDENT> from django.core.management import execute_from_command_line <NEW_LINE> <DEDENT> except ImportError as exc: <NEW_LINE> <INDENT> raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc <NEW_LINE> <DEDENT> execute_from_command_line(sys.argv)
Run administrative tasks.
625941b8656771135c3eb6bf
def create_thumbnail(fname, size=(100, 100), aspect_ratio=False): <NEW_LINE> <INDENT> im = Image.open(fname) <NEW_LINE> width, height = im.size <NEW_LINE> if width > height: <NEW_LINE> <INDENT> delta = width - height <NEW_LINE> left = int(delta / 2) <NEW_LINE> upper = 0 <NEW_LINE> right = height + left <NEW_LINE> lower = height <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> delta = height - width <NEW_LINE> left = 0 <NEW_LINE> upper = int(delta / 2) <NEW_LINE> right = width <NEW_LINE> lower = width + upper <NEW_LINE> <DEDENT> if not aspect_ratio: <NEW_LINE> <INDENT> im = im.crop((left, upper, right, lower)) <NEW_LINE> <DEDENT> im.thumbnail(size, Image.ANTIALIAS) <NEW_LINE> return im
@type fname: C{string} @param: Full path to image file @type size: C{tuple} @param: Width and height of the thumbnail @rtype: C{Image} @return: Returns PIL Image object
625941b8fb3f5b602dac34db
def test_query_sum(self): <NEW_LINE> <INDENT> start_date = datetime(2015, 1, 1) <NEW_LINE> end_date = datetime(2016, 1, 1) <NEW_LINE> result = data_query.query_sum(self.db, ["tot_1"], start_date, end_date, 1) <NEW_LINE> self.assertEqual(result["total"], 10) <NEW_LINE> result = data_query.query_sum(self.db, ["tot_1"], start_date, end_date, 1) <NEW_LINE> self.assertEqual(result["total"], 10) <NEW_LINE> result = data_query.query_sum(self.db, ["gen_1", "age_1"], start_date, end_date, 1) <NEW_LINE> self.assertEqual(result["total"], 2) <NEW_LINE> result = data_query.query_sum(self.db, ["gen_1", "gen_2"], start_date, end_date, 1) <NEW_LINE> self.assertEqual(result["total"], 0)
Test basic query_sum functionality
625941b823e79379d52ee3b4
def extract_maf_wrapper(target, args): <NEW_LINE> <INDENT> accelerated_genomes = set(args.accelerated_genomes + [args.ref_genome]) <NEW_LINE> outgroup_genomes = set(args.target_genomes) - accelerated_genomes <NEW_LINE> bed_recs = [x.split() for x in open(args.conserved_bed)] <NEW_LINE> result_dir = target.getGlobalTempDir() <NEW_LINE> result_tree = TempFileTree(result_dir) <NEW_LINE> for chunk in grouper(bed_recs, 50): <NEW_LINE> <INDENT> result_path = result_tree.getTempFile() <NEW_LINE> target.addChildTargetFn(extract_and_calculate, args=(args, chunk, accelerated_genomes, outgroup_genomes, result_path)) <NEW_LINE> <DEDENT> target.setFollowOnTargetFn(cat_results, args=(args, result_tree.listFiles()))
Main pipeline wrapper. Calls out to hal2maf once for each region in args.conserved_bed
625941b8d58c6744b4257aad
def write(self, **kwargs): <NEW_LINE> <INDENT> return self.stub.write(**kwargs)
insert a level1 record into database :param kwargs: Parameter dictionary, key items support: level0_id: [str] data_type : [str] prc_params : [str] filename : [str] file_path : [str] prc_status : [int] prc_time : [str] pipeline_id : [str] refs: [dict] :returns: csst_dfs_common.models.Result
625941b82c8b7c6e89b35610
def run_editor_on_exception(root_path=None, usercode_traceback=True, usercode_frame=True): <NEW_LINE> <INDENT> sys.excepthook = _get_debug_except_hook(root_path=root_path, usercode_traceback=usercode_traceback, usercode_frame=usercode_frame)
Run the editor when an unhandled exception (a fatal error) happens. Parameters ---------- root_path : str, optional Defaults to None (the directory of the main script). usercode_traceback : bool, optional Whether or not to show only the part of the traceback (error log) which corresponds to the user code. Otherwise, it will show the complete traceback, including code inside libraries. Defaults to True. usercode_frame : bool, optional Whether or not to start the debug window in the frame corresponding to the user code. This argument is ignored (it is always True) if usercode_traceback is True. Defaults to True. Notes ----- sets sys.excepthook
625941b83539df3088e2e197
def test_one_task(): <NEW_LINE> <INDENT> f = Flow(name="test") <NEW_LINE> f.add_task(get_task("x1")) <NEW_LINE> steps = f.generate_local_task_ids(_debug_steps=True) <NEW_LINE> assert count_unique_ids(steps[1]) == 1 <NEW_LINE> assert steps[1] == steps[2] == steps[3] == steps[4] == steps[5]
x1 A single task
625941b84e4d5625662d4229
def allocate_structure(self,device=-1): <NEW_LINE> <INDENT> self.E = nn.Embedding( self.ref_set.lex_vocab.size(),self.embedding_size) <NEW_LINE> self.lstm = nn.LSTM(self.embedding_size, self.rnn_memory,num_layers=1,bidirectional=False) <NEW_LINE> self.W_struct_label = nn.Linear(self.rnn_memory, self.ref_set.struct_vocab.size()) <NEW_LINE> self.W_lex_action = nn.Linear(self.rnn_memory, self.ref_set.lex_action_vocab.size()) <NEW_LINE> self.W_struct_action = nn.Linear(self.rnn_memory, self.ref_set.struct_action_vocab.size()) <NEW_LINE> self.logsoftmax = nn.LogSoftmax(dim=1) <NEW_LINE> vs = self.ref_set.lex_vocab.size() <NEW_LINE> self.adalogsoftmax = nn.AdaptiveLogSoftmaxWithLoss(self.rnn_memory,vs,[ int(vs/15), 2*int(vs/15), 4*int(vs/15), 8*int(vs/15)], div_value=4.0, head_bias=False)
This allocates the model parameters on the machine. Args: action_size (int): the number of action types lex_size (int): the size of the lexicon vocabulary struct_size (int): the size of the non terminal vocabulary rnn_memory (int): the size of the rnn hidden memory embedding_size(int): the embedding size device (int): the device where to store the params (-1 :cpu ; 0,1,2... : GPU identifier)
625941b83346ee7daa2b2bb5
def onConnectionPrompt(prompt, state, logger): <NEW_LINE> <INDENT> prompt = prompt.lower() <NEW_LINE> state.setdefault('triedPassword', 0) <NEW_LINE> state.setdefault('triedKeys', {}) <NEW_LINE> if 'enter passphrase for key' in prompt: <NEW_LINE> <INDENT> key = re.findall( r'key \'(.+)\':\s*$', prompt, flags = re.IGNORECASE | re.MULTILINE ) <NEW_LINE> if key is None or len(key) != 1: key = '???' <NEW_LINE> else: key = key[0] <NEW_LINE> state['triedKeys'].setdefault(key, 0) <NEW_LINE> if state['triedKeys'][key] > 2: <NEW_LINE> <INDENT> logger.error('Connect failed: incorrect passphrase (after 3 attempts)') <NEW_LINE> return None <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> state['triedKeys'][key] += 1 <NEW_LINE> state['triedPassword'] = 0 <NEW_LINE> logger.debug('Trying key \'{}\''.format(key)) <NEW_LINE> return state['passphrase'] <NEW_LINE> <DEDENT> <DEDENT> if 'password:' in prompt: <NEW_LINE> <INDENT> if state['triedPassword'] > 2: <NEW_LINE> <INDENT> logger.error('Connect failed: incorrect password (after 3 attempts)') <NEW_LINE> return None <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> state['triedPassword'] += 1 <NEW_LINE> state['triedKeys'] = {} <NEW_LINE> logger.debug('Trying password') <NEW_LINE> return state['password'] <NEW_LINE> <DEDENT> <DEDENT> return None
:type prompt: str :type state: dict[str, object] :type logger: logging.Logger :rtype: str|None
625941b8090684286d50eb2c
@patchmethod(tf.Tensor, tf.Variable) <NEW_LINE> def padaxis(t, paddings, axis, mode='CONSTANT', name=None): <NEW_LINE> <INDENT> if isinstance(axis, int): <NEW_LINE> <INDENT> axis = (axis,) <NEW_LINE> if len(paddings) == 2: <NEW_LINE> <INDENT> paddings = [paddings] <NEW_LINE> <DEDENT> assert len(axis) == len(paddings) <NEW_LINE> <DEDENT> assert t.ndim >= len(paddings) <NEW_LINE> padallaxis = [(0, 0)] * t.ndim <NEW_LINE> for i, padding in zip(axis, paddings): <NEW_LINE> <INDENT> padallaxis[i] = padding <NEW_LINE> <DEDENT> return tf.pad(t, padallaxis, mode=mode, name=name)
t.pad((1,1), axis=0) # padleft, padright t.pad([(1,1), (1,1)], axis=[0,1]) # padleft, right, top, bottom :param t: :param paddings: :param axis: :param mode: :return:
625941b8dd821e528d63aff7
def test_role_edit_audit_errors(self): <NEW_LINE> <INDENT> admin_session = self.get_session('admin') <NEW_LINE> admin_session.edit('matholymprole', '1', {'room_types': ['Single room'], 'default_room_type': 'Shared room'}, error='Default room type not in permitted ' 'room types')
Test errors from role edit auditor.
625941b823849d37ff7b2ede
def move_const_torque(self, torque, **kwargs): <NEW_LINE> <INDENT> assert self._current_pos_enc is not None, ( "Home the actuator before attempting to move") <NEW_LINE> self.set_max_torque(torque, chain=True) <NEW_LINE> self.torque_mode(chain=True) <NEW_LINE> self.go(**kwargs)
Make the actuator maintain a constant torque output, units match read_torque return value.
625941b87d847024c06be10c
def guest_live_upload(self, guestaddr, file_to_upload, destination, timeout=10): <NEW_LINE> <INDENT> self.guest_execute_command(guestaddr, "mkdir -p " + os.path.dirname(destination), timeout) <NEW_LINE> return oz.ozutil.subprocess_check_output(["scp", "-i", self.sshprivkey, "-F", "/dev/null", "-o", "ServerAliveInterval=30", "-o", "StrictHostKeyChecking=no", "-o", "ConnectTimeout=" + str(timeout), "-o", "UserKnownHostsFile=/dev/null", "-o", "PasswordAuthentication=no", "-o", "IdentitiesOnly yes", file_to_upload, "root@" + guestaddr + ":" + destination], printfn=self.log.debug)
Method to copy a file to the live guest.
625941b8c4546d3d9de7287c
def parse_ini( source: typing.Union[str, Path, typing.TextIO], intent_filter: typing.Optional[typing.Callable[[str], bool]] = None, sentence_transform: typing.Callable[[str], str] = None, file_name: typing.Optional[str] = None, ) -> IntentsType: <NEW_LINE> <INDENT> intent_filter = intent_filter or (lambda x: True) <NEW_LINE> if isinstance(source, str): <NEW_LINE> <INDENT> source = io.StringIO(source) <NEW_LINE> file_name = file_name or "<StringIO>" <NEW_LINE> <DEDENT> elif isinstance(source, Path): <NEW_LINE> <INDENT> source = open(source, "r") <NEW_LINE> file_name = file_name or str(source) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> file_name = file_name or "<TextIO>" <NEW_LINE> <DEDENT> sentences: IntentsType = defaultdict(list) <NEW_LINE> try: <NEW_LINE> <INDENT> config = configparser.ConfigParser( allow_no_value=True, strict=False, delimiters=["="] ) <NEW_LINE> config.optionxform = str <NEW_LINE> config.read_file(source) <NEW_LINE> _LOGGER.debug("Loaded ini file") <NEW_LINE> line_number: int = 1 <NEW_LINE> for sec_name in config.sections(): <NEW_LINE> <INDENT> if not intent_filter(sec_name): <NEW_LINE> <INDENT> _LOGGER.debug("Skipping %s", sec_name) <NEW_LINE> continue <NEW_LINE> <DEDENT> line_number += 1 <NEW_LINE> for k, v in config[sec_name].items(): <NEW_LINE> <INDENT> if v is None: <NEW_LINE> <INDENT> sentence = k.strip() <NEW_LINE> sentence = sentence.replace("\\[", "[") <NEW_LINE> if sentence_transform: <NEW_LINE> <INDENT> sentence = sentence_transform(sentence) <NEW_LINE> <DEDENT> sentences[sec_name].append( Sentence.parse( sentence, metadata=ParseMetadata( file_name=file_name, line_number=line_number, intent_name=sec_name, ), ) ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> sentence = v.strip() <NEW_LINE> if sentence_transform: <NEW_LINE> <INDENT> sentence = sentence_transform(sentence) <NEW_LINE> <DEDENT> rule = f"<{k.strip()}> = ({sentence});" <NEW_LINE> rule = rule.replace("\\[", "[") <NEW_LINE> sentences[sec_name].append( Rule.parse( rule, metadata=ParseMetadata( file_name=file_name, line_number=line_number, intent_name=sec_name, ), ) ) <NEW_LINE> <DEDENT> line_number += 1 <NEW_LINE> <DEDENT> line_number += 1 <NEW_LINE> <DEDENT> <DEDENT> finally: <NEW_LINE> <INDENT> source.close() <NEW_LINE> <DEDENT> return sentences
Parse multiple JSGF grammars from an ini file.
625941b8287bf620b61d38bb
def kraken_results_df_creator(kraken_hits, rds_or_cntgs): <NEW_LINE> <INDENT> dict_hits = {} <NEW_LINE> k = 1 <NEW_LINE> for i in range(0, len(kraken_hits)): <NEW_LINE> <INDENT> dict_hits['sp_krkn'+str(k)+'_'+rds_or_cntgs] = kraken_hits[i][5].lstrip() <NEW_LINE> dict_hits['sp_krkn'+str(k)+'_'+rds_or_cntgs+'_pc'] = kraken_hits[i][0] <NEW_LINE> k += 1 <NEW_LINE> <DEDENT> return dict_hits
Take the 2D array from a kraken search and return result as a dictionary.
625941b876e4537e8c3514c4
def likelihood(data, theta): <NEW_LINE> <INDENT> mu = theta['mu'] <NEW_LINE> tau = theta['tau'] <NEW_LINE> p = theta['p'] <NEW_LINE> lam = theta['lam'] <NEW_LINE> n_cts = len(mu) <NEW_LINE> n_bin = len(p) <NEW_LINE> n_ord = len(lam) <NEW_LINE> product = 1 <NEW_LINE> if isinstance(data, pd.DataFrame): <NEW_LINE> <INDENT> for i in range(len(data)): <NEW_LINE> <INDENT> for G in range(n_cts): <NEW_LINE> <INDENT> product *= normal_pdf(data.iloc[i, G], mu[G], tau[G]) <NEW_LINE> <DEDENT> for B in range(n_bin): <NEW_LINE> <INDENT> product *= bernoulli_pmf(data.iloc[i, n_cts+B], p[B]) <NEW_LINE> <DEDENT> for P in range(n_ord): <NEW_LINE> <INDENT> product *= poisson_pmf(data.iloc[i, n_cts+n_bin+P], lam[P]) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> for G in range(n_cts): <NEW_LINE> <INDENT> product *= normal_pdf(data[G], mu[G], tau[G]) <NEW_LINE> <DEDENT> for B in range(n_bin): <NEW_LINE> <INDENT> product *= bernoulli_pmf(data[n_cts+B], p[B]) <NEW_LINE> <DEDENT> for P in range(n_ord): <NEW_LINE> <INDENT> product *= poisson_pmf(data[n_cts+n_bin+P], lam[P]) <NEW_LINE> <DEDENT> <DEDENT> return product
Calculates likelihood :param data: the data :type data: pd.DataFrame or pd.Series :param parameters theta: parameters of the distributions :return: P(data|theta) :rtype: 0<=float<=1
625941b832920d7e50b28018
def __init__(self, flask_app, url, server_token): <NEW_LINE> <INDENT> self.server_token = server_token <NEW_LINE> self.error_reply = nothing_reply <NEW_LINE> self.text_match_registry = {} <NEW_LINE> self.text_re_match_registry = [] <NEW_LINE> self.msg_registry = {} <NEW_LINE> self.event_registry = {} <NEW_LINE> flask_app.route(url, methods=['GET', 'POST'])(self._entry)
A wechat server. :param flask_app: :param url: register url to handle. :param server_token: wechat's server token. :return:
625941b8e8904600ed9f1d75
def __train_tree(self): <NEW_LINE> <INDENT> train_on = 15 <NEW_LINE> if len(self.history) > train_on: <NEW_LINE> <INDENT> f = -train_on <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> f = 0 <NEW_LINE> <DEDENT> for cont in self.history[f:]: <NEW_LINE> <INDENT> if not cont.result: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> lab = ACTIONHOT[cont.action] <NEW_LINE> self.short_mem_board.append(cont.board) <NEW_LINE> self.short_mem_labels.append(lab) <NEW_LINE> <DEDENT> if len(self.short_mem_board) > self.short_mem_size: <NEW_LINE> <INDENT> boards = self.short_mem_board[:-self.short_mem_size] <NEW_LINE> labels = self.short_mem_labels[:-self.short_mem_size] <NEW_LINE> self.short_mem_board = self.short_mem_board[-self.short_mem_size:] <NEW_LINE> self.short_mem_labels = self.short_mem_labels[-self.short_mem_size:] <NEW_LINE> size = int(len(boards) / 4) <NEW_LINE> for _ in range(size): <NEW_LINE> <INDENT> index = np.random.randint(len(boards), size=1)[0] <NEW_LINE> self.long_mem_board.append(boards.pop(index)) <NEW_LINE> self.long_mem_labels.append(labels.pop(index)) <NEW_LINE> <DEDENT> <DEDENT> boards = self.short_mem_board.copy() <NEW_LINE> boards.extend(self.long_mem_board) <NEW_LINE> labels = self.short_mem_labels.copy() <NEW_LINE> labels.extend(self.long_mem_labels) <NEW_LINE> clf_new = _new_Tree() <NEW_LINE> clf_new = clf_new.fit(boards, labels) <NEW_LINE> self.current += 1 <NEW_LINE> clf_old_count = 0 <NEW_LINE> clf_new_count = 0 <NEW_LINE> for test, lab in zip(boards, labels): <NEW_LINE> <INDENT> pred_old = self.clf.predict_proba([test])[0] <NEW_LINE> pred_new = clf_new.predict_proba([test])[0] <NEW_LINE> if pred_old[lab] > 0.34: <NEW_LINE> <INDENT> clf_old_count += 1 <NEW_LINE> <DEDENT> if pred_new[lab] > 0.34: <NEW_LINE> <INDENT> clf_new_count += 1 <NEW_LINE> <DEDENT> <DEDENT> print(f"Version {self.active} -> {clf_old_count} vs {clf_new_count} <- Version {self.current}") <NEW_LINE> if clf_new_count > clf_old_count: <NEW_LINE> <INDENT> self.active = self.current <NEW_LINE> self.clf = clf_new
L.__train_tree() -> None -- update short & long memory and build new tree and test old and new
625941b838b623060ff0ac3b
def sendGetMessage(url, cookiename, callback): <NEW_LINE> <INDENT> request = {"action": "getCookie", "url": url, "cookieName": cookiename} <NEW_LINE> chrome.runtime.sendMessage(request, callback)
broadcast a cookie get request.
625941b8f9cc0f698b140452
def test_no_displacement(self): <NEW_LINE> <INDENT> self.logTestName() <NEW_LINE> mag_alpha = 0. <NEW_LINE> for phase_alpha in phase_alphas: <NEW_LINE> <INDENT> alpha = mag_alpha * np.exp(1j * phase_alpha) <NEW_LINE> self.circuit.reset(pure=self.kwargs['pure']) <NEW_LINE> self.circuit.displacement(alpha, 0) <NEW_LINE> self.assertAllTrue(self.circuit.is_vacuum(self.tol))
Tests displacement operation in some limiting cases where the result should be a vacuum state.
625941b856b00c62f0f144aa
def put(self,key,value,ex=60 * 60 * 24 * 5): <NEW_LINE> <INDENT> self.redis.set(key, value,ex=ex)
ex:默认过期时间为5天,如果一个task 5天都没有跑完,直接丢弃记录
625941b87cff6e4e811177d3
def normalise(self): <NEW_LINE> <INDENT> _, θ = self.polar() <NEW_LINE> return Vector.from_polar(1, θ)
return a normalised unit vector
625941b866673b3332b91ee4
def handle_getperms(bot, ievent): <NEW_LINE> <INDENT> try: name = ievent.args[0] <NEW_LINE> except IndexError: <NEW_LINE> <INDENT> ievent.missing('<name>') <NEW_LINE> return <NEW_LINE> <DEDENT> name = name.lower() <NEW_LINE> if not bot.users.exist(name): <NEW_LINE> <INDENT> ievent.reply("can't find user %s" % name) <NEW_LINE> return <NEW_LINE> <DEDENT> perms = bot.users.getuserperms(name) <NEW_LINE> if perms: ievent.reply("permissions of %s: " % name, perms) <NEW_LINE> else: ievent.reply('%s has no permissions set' % name)
arguments: <name> - get permissions of name.
625941b830bbd722463cbc0f
def draw_network(graph, filename = 'network.pdf', show = False, verbose = 2 ): <NEW_LINE> <INDENT> if verbose >=2: <NEW_LINE> <INDENT> print(".. Saving the network down as an image") <NEW_LINE> <DEDENT> if verbose >=3 : <NEW_LINE> <INDENT> print("... Coverting to dot") <NEW_LINE> <DEDENT> dot = to_pydot(graph) <NEW_LINE> dot.set_node_defaults(style="filled", fillcolor="grey") <NEW_LINE> dot.set_edge_defaults(color="blue", arrowhead="vee", weight="0") <NEW_LINE> if verbose >=3 : <NEW_LINE> <INDENT> print("... Writing down") <NEW_LINE> <DEDENT> dot.write_png(filename)
This is a simple wrapper to the networkx_draw. Args: graph: Supply a networkx graph object. NNs are all DiGraphs. filename: what file to save down as. Will add '.png' to the end. verbose: Do I even have to talk about this ? Notes: Takes any format that networkx plotter takes. This is not ready to be used. Still buggy sometimes. Rudra is working on developing this further internally. This is slow at the moment.
625941b8460517430c393fdb
def invokeFactory(self, type_name, id, RESPONSE=None, *args, **kw): <NEW_LINE> <INDENT> constrains = IConstrainTypes(self, None) <NEW_LINE> if constrains: <NEW_LINE> <INDENT> allowed_ids = [ fti.getId() for fti in constrains.allowedContentTypes() ] <NEW_LINE> if type_name not in allowed_ids: <NEW_LINE> <INDENT> raise ValueError( 'Subobject type disallowed by IConstrainTypes adapter: %s' % type_name ) <NEW_LINE> <DEDENT> <DEDENT> return super(Container, self).invokeFactory( type_name, id, RESPONSE, *args, **kw )
Invokes the portal_types tool
625941b80383005118ecf431
def get_all_balances_paged(self, limit=None, page=None): <NEW_LINE> <INDENT> data = {} <NEW_LINE> if limit: <NEW_LINE> <INDENT> data['limit'] = limit <NEW_LINE> <DEDENT> if page: <NEW_LINE> <INDENT> data['page'] = page <NEW_LINE> <DEDENT> return self._get('account/balances', True, data=data)
Get all coin balances with paging if that's what you want https://kucoinapidocs.docs.apiary.io/#reference/0/assets-operation/get-all-balance :param limit: optional - Number of balances default 12, max 20 :type limit: int :param page: optional - Page to fetch :type page: int .. code:: python # get the default response balances = client.get_all_balances() # get a paged response balances = client.get_all_balances(limit=20, page=2) :returns: ApiResponse .. code:: python [ { coinType: "BTC", balance: 1233214, freezeBalance: 321321, balanceStr: "1233214" freezeBalanceStr: "321321" } ] :raises: KucoinResponseException, KucoinAPIException
625941b84a966d76dd550e59
def run(self, force=False): <NEW_LINE> <INDENT> if self.solution['results'] is not None and not force: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> master_results = np.zeros_like(np.arange(self._starts[0], self._stops[0], self._skip), dtype=np.float32) <NEW_LINE> counter = np.zeros_like(master_results, dtype=np.float32) <NEW_LINE> pm = ProgressMeter(self.nruns, interval=1, format="Performing run %(step)5d/%(numsteps)d" "[%(percentage)5.1f%%]\r") <NEW_LINE> for i, (start, stop) in enumerate(zip(self._starts, self._stops)): <NEW_LINE> <INDENT> pm.echo(i + 1) <NEW_LINE> results = self._single_run(int(start), int(stop)) <NEW_LINE> nresults = len(results) <NEW_LINE> if nresults == len(master_results): <NEW_LINE> <INDENT> master_results += results <NEW_LINE> counter += 1.0 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> master_results[:nresults] += results <NEW_LINE> counter[:nresults] += 1.0 <NEW_LINE> <DEDENT> <DEDENT> master_results /= counter <NEW_LINE> self.solution['time'] = np.arange( len(master_results), dtype=np.float32) * self.u.trajectory.dt * self._skip <NEW_LINE> self.solution['results'] = master_results
Run all the required passes Parameters: ----------- force : bool, optional Will overwrite previous results if they exist
625941b89b70327d1c4e0c20
def fix_on_leg(self, fix, leg): <NEW_LINE> <INDENT> larger_than_minimum = not self.fix_before_leg(fix, leg) <NEW_LINE> smaller_than_maximum = not self.fix_after_leg(fix, leg) <NEW_LINE> return larger_than_minimum and smaller_than_maximum
Return whether fix takes place within certain leg, excluding the boundaries :param fix: :param leg: :return:
625941b8cc40096d615957a0
def test_topology_normalization(topology_with_dupl_links, normalized_topology_example): <NEW_LINE> <INDENT> top_with_data = task_25_1c.Topology(topology_with_dupl_links) <NEW_LINE> assert len(top_with_data.topology) == len(normalized_topology_example)
Проверка удаления дублей в топологии
625941b8377c676e91271ff7
def wait(self, expect=0): <NEW_LINE> <INDENT> if expect > 0: <NEW_LINE> <INDENT> raise AssertionError('expect <= 0') <NEW_LINE> <DEDENT> if expect == 0 and len(self.buffer) > 0: <NEW_LINE> <INDENT> return self.pop() <NEW_LINE> <DEDENT> while True: <NEW_LINE> <INDENT> s = self.ch_in.readline() <NEW_LINE> self.logger.info("read: %s", s) <NEW_LINE> ind, obj = json.loads(s) <NEW_LINE> if (expect == 0 and ind < 0) or (expect < 0 and expect != ind): <NEW_LINE> <INDENT> raise ValueError('Incorrect index received! {} != {}', expect, ind) <NEW_LINE> <DEDENT> elif expect < 0 and ind > 0: <NEW_LINE> <INDENT> self.buffer.insert(0, obj) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> <DEDENT> return obj
Blocking function. Use with care! `expect` should either be 0 or a negative number. If `expect == 0`, any positive indexed object is returned. Otherwise, it will queue any positive objects until the first negative object is received. If the received negative object does not match `expect`, then a ValueError is raised.
625941b838b623060ff0ac3c
def __init__(self, name=None, value=None): <NEW_LINE> <INDENT> self._name = None <NEW_LINE> self._value = None <NEW_LINE> self.discriminator = None <NEW_LINE> if name is not None: <NEW_LINE> <INDENT> self.name = name <NEW_LINE> <DEDENT> if value is not None: <NEW_LINE> <INDENT> self.value = value
ScreenRecordingFilterPageViewReferrerParam - a model defined in Swagger
625941b8099cdd3c635f0aa9
def add_nzb(self, filename, content=None, category=None, priority=PRIORITY.NORMAL): <NEW_LINE> <INDENT> if not self.api_connect(): <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> add_to_top = False <NEW_LINE> add_paused = False <NEW_LINE> dup_key = '' <NEW_LINE> dup_score = 0 <NEW_LINE> dup_mode = NZBGetDuplicateMode.FORCE <NEW_LINE> if content is None: <NEW_LINE> <INDENT> if not category: <NEW_LINE> <INDENT> meta = self.parse_nzbfile(filename) <NEW_LINE> category = unescape_xml(meta.get('CATEGORY', '').strip()) <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> f = open(filename, "r") <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> self.logger.debug('API:NZB-File Could not open: %s' % filename) <NEW_LINE> return False <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> content = f.read() <NEW_LINE> f.close() <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> self.logger.debug('API:NZB-File Could not read: %s' % filename) <NEW_LINE> f.close() <NEW_LINE> return False <NEW_LINE> <DEDENT> <DEDENT> elif not category: <NEW_LINE> <INDENT> meta = self.parse_nzbcontent(content) <NEW_LINE> category = unescape_xml(meta.get('CATEGORY', '').strip()) <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> b64content = standard_b64encode(content) <NEW_LINE> <DEDENT> except TypeError: <NEW_LINE> <INDENT> b64content = standard_b64encode( content.encode('utf-8')).decode('utf-8') <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> return self.api.append( filename, b64content, category, priority, add_to_top, add_paused, dup_key, dup_score, dup_mode, ) <NEW_LINE> <DEDENT> except ConnectionRefusedError: <NEW_LINE> <INDENT> self.logger.error( 'API:NZB-File could not establish a ' 'connection to %s.' % str(self._xmlrpc_url)) <NEW_LINE> return False <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> exc_type, exc_value, exc_traceback = exc_info() <NEW_LINE> lines = traceback.format_exception( exc_type, exc_value, exc_traceback) <NEW_LINE> if self.script_mode != SCRIPT_MODE.NONE: <NEW_LINE> <INDENT> for line in lines: <NEW_LINE> <INDENT> self.logger.error(line) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> self.logger.error( 'API:NZB-File append() Exception:\n%s' % ''.join(' ' + line for line in lines)) <NEW_LINE> <DEDENT> return False <NEW_LINE> <DEDENT> return True
Simply add's an NZB file to NZBGet (via the API)
625941b857b8e32f524832ed
def leafSimilar(self, root1, root2): <NEW_LINE> <INDENT> def dfs(node, lst): <NEW_LINE> <INDENT> if not node.left and not node.right: <NEW_LINE> <INDENT> lst.append(node.val) <NEW_LINE> return <NEW_LINE> <DEDENT> if node.left: <NEW_LINE> <INDENT> dfs(node.left, lst) <NEW_LINE> <DEDENT> if node.right: <NEW_LINE> <INDENT> dfs(node.right, lst) <NEW_LINE> <DEDENT> <DEDENT> list_1 = [] <NEW_LINE> dfs(root1, list_1) <NEW_LINE> list_2 = [] <NEW_LINE> dfs(root2, list_2) <NEW_LINE> if len(list_1) != len(list_2): <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> for i, j in zip(list_1, list_2): <NEW_LINE> <INDENT> if i != j: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> <DEDENT> return True
:type root1: TreeNode :type root2: TreeNode :rtype: bool
625941b8d99f1b3c44c673e4
def check_for_password_before_cmdlist_func_call(*args, **kwargs): <NEW_LINE> <INDENT> util.log_info("... cmdlist_func = %s %s" % (archive_cmdlist_func, '')) <NEW_LINE> util.log_info("... kwargs=%s args=%s" % (kwargs, args)) <NEW_LINE> if 'password' in kwargs and kwargs['password'] is None: <NEW_LINE> <INDENT> kwargs.pop('password') <NEW_LINE> <DEDENT> if 'password' not in kwargs: <NEW_LINE> <INDENT> return archive_cmdlist_func(*args, **kwargs) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if 'password' in inspect.signature(archive_cmdlist_func).parameters: <NEW_LINE> <INDENT> return archive_cmdlist_func(*args, **kwargs) <NEW_LINE> <DEDENT> raise util.PatoolError('There is no support for password in %s' % program)
If password is None, or not set, run command as usual. If password is set, but can't be accepted raise appropriate message.
625941b8fbf16365ca6f6009
@pytest.fixture(scope="module") <NEW_LINE> def inertialbase_objects_rt(dcm, mem): <NEW_LINE> <INDENT> dico = {} <NEW_LINE> coord = ["X", "Y", "Z"] <NEW_LINE> for each in coord: <NEW_LINE> <INDENT> dico["Acc" + each] = InertialSensorBase( dcm, mem, "Accelerometer" + each) <NEW_LINE> dico["Angle" + each] = InertialSensorBase( dcm, mem, "Angle" + each) <NEW_LINE> dico["Gyr" + each] = InertialSensorBase( dcm, mem, "Gyr" + each) <NEW_LINE> <DEDENT> return dico
Return a dictionary with several objects for each sensor value of the inertial base
625941b810dbd63aa1bd29fc
def create_deck(self, user_id, deck_name): <NEW_LINE> <INDENT> url = f'http://127.0.0.1:5000//Server/create_deck/{user_id}' <NEW_LINE> data = {deck_name: ''} <NEW_LINE> response = requests.post(url, json=data) <NEW_LINE> print(response.json())
Создание колоды
625941b8d7e4931a7ee9dd68
@deprecated( "2016-11-30", "Please switch to tf.summary.image. Note that " "tf.summary.image uses the node name instead of the tag. " "This means that TensorFlow will automatically de-duplicate summary " "names based on the scope they are created in. Also, the max_images " "argument was renamed to max_outputs.") <NEW_LINE> def image_summary(tag, tensor, max_images=3, collections=None, name=None): <NEW_LINE> <INDENT> with ops.name_scope(name, "ImageSummary", [tag, tensor]) as scope: <NEW_LINE> <INDENT> val = gen_logging_ops._image_summary( tag=tag, tensor=tensor, max_images=max_images, name=scope) <NEW_LINE> _Collect(val, collections, [ops.GraphKeys.SUMMARIES]) <NEW_LINE> <DEDENT> return val
Outputs a `Summary` protocol buffer with images. For an explanation of why this op was deprecated, and information on how to migrate, look ['here'](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/deprecated/__init__.py) The summary has up to `max_images` summary values containing images. The images are built from `tensor` which must be 4-D with shape `[batch_size, height, width, channels]` and where `channels` can be: * 1: `tensor` is interpreted as Grayscale. * 3: `tensor` is interpreted as RGB. * 4: `tensor` is interpreted as RGBA. The images have the same number of channels as the input tensor. For float input, the values are normalized one image at a time to fit in the range `[0, 255]`. `uint8` values are unchanged. The op uses two different normalization algorithms: * If the input values are all positive, they are rescaled so the largest one is 255. * If any input value is negative, the values are shifted so input value 0.0 is at 127. They are then rescaled so that either the smallest value is 0, or the largest one is 255. The `tag` argument is a scalar `Tensor` of type `string`. It is used to build the `tag` of the summary values: * If `max_images` is 1, the summary value tag is '*tag*/image'. * If `max_images` is greater than 1, the summary value tags are generated sequentially as '*tag*/image/0', '*tag*/image/1', etc. Args: tag: A scalar `Tensor` of type `string`. Used to build the `tag` of the summary values. tensor: A 4-D `uint8` or `float32` `Tensor` of shape `[batch_size, height, width, channels]` where `channels` is 1, 3, or 4. max_images: Max number of batch elements to generate images for. collections: Optional list of ops.GraphKeys. The collections to add the summary to. Defaults to [ops.GraphKeys.SUMMARIES] name: A name for the operation (optional). Returns: A scalar `Tensor` of type `string`. The serialized `Summary` protocol buffer.
625941b8090684286d50eb2d
def show_conv_to_rve(self): <NEW_LINE> <INDENT> self.convert_to_rve_widget = ConvertSegForRVEWidget() <NEW_LINE> self.convert_to_rve_widget.show()
show window to convert segmentations to RhizoVision Explorer compatible format
625941b850485f2cf553cbe6
def pthread_self(): <NEW_LINE> <INDENT> f = gdb.newest_frame() <NEW_LINE> while f.name() != 'start_thread': <NEW_LINE> <INDENT> f = f.older() <NEW_LINE> if f is None: <NEW_LINE> <INDENT> return get_fs_base() <NEW_LINE> <DEDENT> <DEDENT> try: <NEW_LINE> <INDENT> return f.read_var("arg") <NEW_LINE> <DEDENT> except ValueError: <NEW_LINE> <INDENT> return get_fs_base()
Fetch pthread_self() from the glibc start_thread function.
625941b8ab23a570cc24ffcd
def GetTitleString(self): <NEW_LINE> <INDENT> fname = self.GetFileName() <NEW_LINE> title = os.path.split(fname)[-1] <NEW_LINE> if not len(title): <NEW_LINE> <INDENT> title = fname = self.GetTabLabel() <NEW_LINE> <DEDENT> if self.GetModify() and not title.startswith(u'*'): <NEW_LINE> <INDENT> title = u"*" + title <NEW_LINE> <DEDENT> return u"%s - file://%s" % (title, fname)
Get the title string to display in the MainWindows title bar @return: (unicode) string
625941b8fff4ab517eb2f286
def commit_counts(self, session): <NEW_LINE> <INDENT> limit = 1 <NEW_LINE> if self.cpt >= limit: <NEW_LINE> <INDENT> session.commit() <NEW_LINE> self.cpt = 0
Commits data to db if enough data has to be updated FIXME: By not commiting every time, we might have duplicates if the same guy tweets several times with the same flag
625941b8498bea3a759b98fe
def get_version(request): <NEW_LINE> <INDENT> if 'jsonrpc' in request: <NEW_LINE> <INDENT> return 2.0 <NEW_LINE> <DEDENT> elif 'id' in request: <NEW_LINE> <INDENT> return 1.0 <NEW_LINE> <DEDENT> return None
Computes the JSON-RPC version :param request: A request dictionary :return: The JSON-RPC version or None
625941b85e10d32532c5ed7c
def __iter__(self): <NEW_LINE> <INDENT> return iter(self._item)
调用迭代环境时返回迭代器
625941b86fece00bbac2d588
def get_default_params(type='BSPLINE'): <NEW_LINE> <INDENT> p = Parameters() <NEW_LINE> type = type.upper() <NEW_LINE> p.Metric = 'AdvancedMattesMutualInformation' <NEW_LINE> p.NumberOfHistogramBins = 32 <NEW_LINE> p.ImageSampler = 'RandomCoordinate' <NEW_LINE> p.NumberOfSpatialSamples = 2048 <NEW_LINE> p.NewSamplesEveryIteration = True <NEW_LINE> p.NumberOfResolutions = 4 <NEW_LINE> if type in ['B', 'BSPLINE', 'B-SPLINE']: <NEW_LINE> <INDENT> p.Transform = 'BSplineTransform' <NEW_LINE> p.FinalGridSpacingInPhysicalUnits = 16 <NEW_LINE> <DEDENT> if type in ['RIGID', 'EULER', 'AFFINE']: <NEW_LINE> <INDENT> if type in ['RIGID', 'EULER']: <NEW_LINE> <INDENT> p.Transform = 'EulerTransform' <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> p.Transform = 'AffineTransform' <NEW_LINE> <DEDENT> p.AutomaticScalesEstimation = True <NEW_LINE> p.AutomaticTransformInitialization = True <NEW_LINE> <DEDENT> p.Optimizer = 'AdaptiveStochasticGradientDescent' <NEW_LINE> p.MaximumNumberOfIterations = 500 <NEW_LINE> return p
get_default_params(type='BSPLINE') Get `Parameters` struct with parameters that users may want to tweak. The given `type` specifies the type of allowed transform, and can be 'RIGID', 'AFFINE', 'BSPLINE'. For detail on what parameters are available and how they should be used, we refer to the Elastix documentation. Here is a description of the most common parameters: * Transform (str): Can be 'BSplineTransform', 'EulerTransform', or 'AffineTransform'. The transformation to apply. Chosen based on `type`. * FinalGridSpacingInPhysicalUnits (int): When using the BSplineTransform, the final spacing of the grid. This controls the smoothness of the final deformation. * AutomaticScalesEstimation (bool): When using a rigid or affine transform. Scales the affine matrix elements compared to the translations, to make sure they are in the same range. In general, it's best to use automatic scales estimation. * AutomaticTransformInitialization (bool): When using a rigid or affine transform. Automatically guess an initial translation by aligning the geometric centers of the fixed and moving. * NumberOfResolutions (int): Most registration algorithms adopt a multiresolution approach to direct the solution towards a global optimum and to speed up the process. This parameter specifies the number of scales to apply the registration at. (default 4) * MaximumNumberOfIterations (int): Maximum number of iterations in each resolution level. 200-2000 works usually fine for nonrigid registration. The more, the better, but the longer computation time. This is an important parameter! (default 500).
625941b885dfad0860c3aca6
def matches(self, jira_connection: 'JiraConnection', find: str) -> bool: <NEW_LINE> <INDENT> if self.jira_connection_name == jira_connection.connection_name: <NEW_LINE> <INDENT> if find in self.issue_key: <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> for value in list(self.values()): <NEW_LINE> <INDENT> if find in value: <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return False
Tests all values in the ticket to see if they match the input string. Requires input of the JiraConnection you want to compare against to protect against duplicate project names across different JIRA instances.
625941b8b545ff76a8913c6c
def die(msg, *args): <NEW_LINE> <INDENT> error(msg, *args) <NEW_LINE> sys.exit(1)
Print as error message to stderr and exit the program.
625941b831939e2706e4ccbd
def delete(self, item_id, comment_id): <NEW_LINE> <INDENT> auth, retval = __check_auth__(self.auth_dict) <NEW_LINE> if auth: <NEW_LINE> <INDENT> return retval <NEW_LINE> <DEDENT> query = ItemComment.query.filter(ItemComment.id == comment_id) <NEW_LINE> query = query.filter(ItemComment.user_id == current_user.id).delete() <NEW_LINE> db.session.commit() <NEW_LINE> return {'result': 'success'}, 202
.. http:delete:: /api/1/items/<int:item_id>/comment/<int:comment_id> Deletes an item comment. **Example Request**: .. sourcecode:: http DELETE /api/1/items/1234/comment/7718 HTTP/1.1 Host: example.com Accept: application/json { } **Example Response**: .. sourcecode:: http HTTP/1.1 202 OK Vary: Accept Content-Type: application/json { 'status': 'deleted' } :statuscode 202: Deleted :statuscode 401: Authentication Error. Please Login.
625941b826068e7796caeb25
def _after_init(self, _): <NEW_LINE> <INDENT> username = password = None <NEW_LINE> if creds is not None: <NEW_LINE> <INDENT> username, password = creds.username, creds.password <NEW_LINE> <DEDENT> if username is None: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> if username is not None: <NEW_LINE> <INDENT> callback = partial(self._after_password, username, password) <NEW_LINE> Clock.schedule_once(callback) <NEW_LINE> return <NEW_LINE> <DEDENT> pw = PasswordPopup(self) <NEW_LINE> pw.open()
Get password
625941b8dc8b845886cb5381
def borrow_book(self, book, patron): <NEW_LINE> <INDENT> patron.add_borrowed_book(book.lower()) <NEW_LINE> self.db.update_patron(patron)
Borrows a book for a Patron. :param book: the title of the book :param patron: the Patron object
625941b88c0ade5d55d3e80c
def OnInit(self): <NEW_LINE> <INDENT> self.SetAppName('CEBL') <NEW_LINE> main = CEBLMain() <NEW_LINE> return True
Create a new CEBLMain frame.
625941b8d164cc6175782b9b
def test_nets(self): <NEW_LINE> <INDENT> good_nets = self.good.nets[:] <NEW_LINE> self.assertEqual(len(good_nets), 5) <NEW_LINE> for net in self.actual.nets: <NEW_LINE> <INDENT> for goodnet in good_nets: <NEW_LINE> <INDENT> if set(net.points) == set(goodnet.points): <NEW_LINE> <INDENT> good_nets.remove(goodnet) <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> raise Exception('bad net', net) <NEW_LINE> <DEDENT> <DEDENT> self.assertEqual(good_nets, [])
Test that all the right nets are present with the right points.
625941b81d351010ab85596b
def get_stdev(nums): <NEW_LINE> <INDENT> pass
Helper function for calculating the standard deviation of a list of numbers. :param nums: list of numbers :return: standard deviation of list
625941b8d268445f265b4cc2
def schedule_teleport(self, position, map_id=None, *map_args): <NEW_LINE> <INDENT> if map_id is not None: <NEW_LINE> <INDENT> self.world.schedule_teleport(position, map_id, *map_args) <NEW_LINE> self.controller.stop() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.teleport_object(self.party_avatar, position)
After the current iteration of the WorldMap's context stack, the party will be teleported to the WorldMap represented by *map_id* at *position*. This method may also be used to teleport to another place in the same map, by passing None. If the map id of the current map is passed, the party will be removed and added to the map, causing the state to be saved and the map to be reinitialized as if it were just entered. If the target map takes arguments for creation, pass them as *map_args*.
625941b8796e427e537b040f
def identify(self, authority, **kwds): <NEW_LINE> <INDENT> return authority.onDescriptor(descriptor=self, **kwds)
Let {authority} know I am a descriptor
625941b80c0af96317bb8036
def show_softclip(self): <NEW_LINE> <INDENT> bsoftclip = self.gamma.bsoftclip <NEW_LINE> if isinstance(bsoftclip, dict): <NEW_LINE> <INDENT> paramstr = ', '.join('{!s}={!r}'.format(key, val) for (key, val) in bsoftclip.items()) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> paramstr = bsoftclip <NEW_LINE> <DEDENT> return 'Set soft clip start: {} ({})'.format(self.gamma.get_effective_bsoftclip(), paramstr)
Return softclip start value(s) to show in menu
625941b829b78933be1e5506
def get_auto_login_url(self, url, name, token, login_type): <NEW_LINE> <INDENT> login_type = constants.ClassRoomAutoLoginType(login_type) <NEW_LINE> if login_type in ( constants.ClassRoomAutoLoginType.record, constants.ClassRoomAutoLoginType.audience, ): <NEW_LINE> <INDENT> return '{}&autoLogin=true&viewername={}&viewertoken={}'.format(url, name, token) <NEW_LINE> <DEDENT> return '{}&autoLogin=true&username={}&password={}'.format(url, name, token)
url 为 room_link 中拿到的各个url, 教师端: https://class.csslcloud.net/index/presenter/?roomid=FC3548C1133061D09C33DC5901307461&userid=E9607DAFB705A798&username=XXX&password=XXX&autoLogin=true 互动者: https://class.csslcloud.net/index/talker/?roomid=FC3548C1133061D09C33DC5901307461&userid=E9607DAFB705A798&username=XXX&password=XXX&autoLogin=true 旁听端: http://view.csslcloud.net/api/view/index?roomid=xxx&userid=xxx&autoLogin=true&viewername=11&viewertoken=11 回放端: http://view.csslcloud.net/api/view/callback/login?liveid=xxx&roomid=xxx&userid=xxx&autoLogin=true&viewername=11&viewertoken=11 注意: 请详细比对上述URL示例
625941b894891a1f4081b8f5
def getPercCpuLoad(self): <NEW_LINE> <INDENT> return int(self.getCpuLoad() * 100.0)
Metoda vrací vytížení procesoru v procentech 0 až 100 \param self Ukazatel na objekt eturn Procento mezi 0 až 100
625941b8de87d2750b85fbdb
def _test_expected_for_job(self, expected_results, job): <NEW_LINE> <INDENT> results = {} <NEW_LINE> with job.make_runner() as runner: <NEW_LINE> <INDENT> runner.run() <NEW_LINE> for line in runner.stream_output(): <NEW_LINE> <INDENT> key, value = job.parse_output_line(line) <NEW_LINE> results[key] = value <NEW_LINE> <DEDENT> <DEDENT> self.assertDictEqual(expected_results, results)
Simple utility function to test results are as expected
625941b8cb5e8a47e48b78fd
def process_mptcp_pkt_from_server(ts_delta, acks, conn_acks, mptcp_connections, tcp, ip, saddr, daddr, sport, dport): <NEW_LINE> <INDENT> dss, dack, dss_is_8_bytes = get_dss_and_data_ack(tcp) <NEW_LINE> conn_id = acks[daddr, dport, saddr, sport][co.CONN_ID] <NEW_LINE> flow_id = acks[daddr, dport, saddr, sport][co.FLOW_ID] <NEW_LINE> if conn_acks[conn_id][co.C2S] >= 0: <NEW_LINE> <INDENT> max_val = 2**64 if dss_is_8_bytes else 2**32 <NEW_LINE> bytes_acked = (dack - conn_acks[conn_id][co.C2S]) % max_val <NEW_LINE> if bytes_acked >= 2000000000: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> size_payload = ip.len - ip.hl * 4 - tcp.off * 4 <NEW_LINE> if (size_payload > 0 and dss in conn_acks[conn_id][SEQ_S2C] and (dss - conn_acks[conn_id][co.S2C]) % max_val < 2000000000 and (mptcp_connections[conn_id].attr[co.S2C][co.TIME_LAST_ACK_TCP] - ts_delta).total_seconds() > 0.0): <NEW_LINE> <INDENT> mptcp_connections[conn_id].attr[co.S2C][co.RETRANS_DSS].append((ts_delta, flow_id, dss, conn_acks[conn_id][HSEQ_S2C][dss][2], ts_delta - conn_acks[conn_id][HSEQ_S2C][dss][0], ts_delta - conn_acks[conn_id][HSEQ_S2C][dss][1], ts_delta - conn_acks[conn_id][co.TIMESTAMP][SERVER])) <NEW_LINE> conn_acks[conn_id][HSEQ_S2C][dss][1] = ts_delta <NEW_LINE> <DEDENT> elif size_payload > 0 and dss is not False: <NEW_LINE> <INDENT> conn_acks[conn_id][SEQ_S2C].add(dss) <NEW_LINE> conn_acks[conn_id][HSEQ_S2C][dss] = [ts_delta, ts_delta, ts_delta - conn_acks[conn_id][co.TIMESTAMP][SERVER]] <NEW_LINE> <DEDENT> <DEDENT> conn_acks[conn_id][co.C2S] = dack <NEW_LINE> acks[daddr, dport, saddr, sport][co.TIMESTAMP][SERVER] = ts_delta <NEW_LINE> conn_acks[conn_id][co.TIMESTAMP][SERVER] = ts_delta
Process a packet with ACK set from the server for the MPTCP DSS retransmissions
625941b823e79379d52ee3b5
def SetFullyConnected(self, *args): <NEW_LINE> <INDENT> return _itkOpeningByReconstructionImageFilterPython.itkOpeningByReconstructionImageFilterID3ID3SE3_SetFullyConnected(self, *args)
SetFullyConnected(self, bool _arg)
625941b863f4b57ef0000f70
def query_block_height(height, bestblock=False): <NEW_LINE> <INDENT> if height < 0: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> if not bestblock: <NEW_LINE> <INDENT> kwargs = {'method': 'getblockhash', 'params': [height]} <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> kwargs = {'method': 'getbestblockhash'} <NEW_LINE> <DEDENT> btc.wss.send(**kwargs) <NEW_LINE> blockhash = btc.wss.recv() <NEW_LINE> if blockhash['result']: <NEW_LINE> <INDENT> bhash = blockhash['result'] <NEW_LINE> return query_block_hash(bhash)
Return a block by its height. :param bool bestblock: if True, will ignore the heigh param and return the most recent block.
625941b830bbd722463cbc10
def delDoor(self, ctrllerMac, doorId): <NEW_LINE> <INDENT> doorId = str(doorId).encode('utf8') <NEW_LINE> msg = CUD + b'P' + b'D' + b'{"id": ' + doorId + b'}' + END <NEW_LINE> try: <NEW_LINE> <INDENT> self.netMngr.sendToCtrller(msg, ctrllerMac) <NEW_LINE> <DEDENT> except CtrllerDisconnected: <NEW_LINE> <INDENT> self.logger.warning("Controller disconnected to delete door")
Receives the controller MAC and the door ID. With them it creates the message to send it to controller (to delete). It gives the created message to the network manager thread.
625941b8796e427e537b0410
def url__cinema_regulate__open_ticket(self, apply_data: dict, remark_info: dict = None) -> bool: <NEW_LINE> <INDENT> uid = self.session.uid <NEW_LINE> flow_id = chv.OPEN_FLOW <NEW_LINE> apply_name = "开业申请" <NEW_LINE> remark_info = self.ckt.deal_remark_info(remark_info) <NEW_LINE> user, cinema_code = self.ckt.get_cinema_account(uid) <NEW_LINE> cinema_info, ticket_data = self.ckt.ticket_apply_verify(chv.CINEMA_PLATFORM, flow_id, apply_name, cinema_code, user) <NEW_LINE> open_info = self.ckt.cinema_open_info_verify(apply_data) <NEW_LINE> open_info.update({ "cinema_code": cinema_code, "cinema_name": cinema_info["name"] }) <NEW_LINE> self.ckt.new_open_ticket(flow_id, user, cinema_info, ticket_data, open_info, remark_info) <NEW_LINE> return True
开业申请
625941b8a8ecb033257d2f23
def get_estimation_schema(): <NEW_LINE> <INDENT> schema = TASKSCHEMA.clone() <NEW_LINE> schema['lines']['lines'].doctype = "estimation" <NEW_LINE> tmpl = 'autonomie:deform_templates/paymentdetails_item.pt' <NEW_LINE> schema.add_before( 'communication', TaskNotes(title=u"Notes", name="notes"), ) <NEW_LINE> schema.add_before( 'communication', EstimationPayments( title=u'Conditions de paiement', widget=deform.widget.MappingWidget( item_template=tmpl ), name='payments', ) ) <NEW_LINE> return schema
Return the schema for estimation add/edit
625941b8097d151d1a222ca9
def purge_csv(self, name='log.csv'): <NEW_LINE> <INDENT> f = open(name,'w') <NEW_LINE> f.close()
Purge the data from the staging csv file Parameters ---------- name : `string` The name of the csv file that you want to log
625941b8b7558d58953c4d69
def __init__(self, hass, device_id, friendly_name, unit_of_measurement, state_template): <NEW_LINE> <INDENT> self.hass = hass <NEW_LINE> self.entity_id = generate_entity_id(ENTITY_ID_FORMAT, device_id, hass=hass) <NEW_LINE> self._name = friendly_name <NEW_LINE> self._unit_of_measurement = unit_of_measurement <NEW_LINE> self._template = state_template <NEW_LINE> self._state = None <NEW_LINE> self.update() <NEW_LINE> def template_sensor_event_listener(event): <NEW_LINE> <INDENT> self.update_ha_state(True) <NEW_LINE> <DEDENT> hass.bus.listen(EVENT_STATE_CHANGED, template_sensor_event_listener)
Initialize the sensor.
625941b8f548e778e58cd3c9
def __utf_to_caps_func(self, line): <NEW_LINE> <INDENT> utf_text = line[17:-1] <NEW_LINE> if self.__caps_list[-1] == 'true' and self.__convert_caps: <NEW_LINE> <INDENT> utf_text = self.__utf_token_to_caps_func(utf_text) <NEW_LINE> <DEDENT> self.__write_obj.write('tx<ut<__________<%s\n' % utf_text)
Required: line -- line to parse returns nothing Logic Get the text, and use another method to convert
625941b83c8af77a43ae35ec
def test_successful_modify_percent_snapshot_space(self): <NEW_LINE> <INDENT> data = self.mock_args() <NEW_LINE> data['percent_snapshot_space'] = '90' <NEW_LINE> set_module_args(data) <NEW_LINE> with pytest.raises(AnsibleExitJson) as exc: <NEW_LINE> <INDENT> self.get_volume_mock_object('volume').apply() <NEW_LINE> <DEDENT> assert exc.value.args[0]['changed']
Test successful modify percent_snapshot_space
625941b8d6c5a10208143e95
def get_sec_group(name=None, region=None, secgroup_id=None, tenant_id=None): <NEW_LINE> <INDENT> __args__ = dict() <NEW_LINE> __args__['name'] = name <NEW_LINE> __args__['region'] = region <NEW_LINE> __args__['secgroupId'] = secgroup_id <NEW_LINE> __args__['tenantId'] = tenant_id <NEW_LINE> __ret__ = pulumi.runtime.invoke('openstack:networking/getSecGroup:getSecGroup', __args__) <NEW_LINE> return GetSecGroupResult( region=__ret__.get('region'), tenant_id=__ret__.get('tenantId'), id=__ret__.get('id'))
Use this data source to get the ID of an available OpenStack security group.
625941b821bff66bcd6847a3