content
stringlengths
22
815k
id
int64
0
4.91M
def extra(): """Tests faint.extra. That is, less central faint code, possibly requiring extensions (e.g. tesseract or GraphViz dot). """ return unittest.defaultTestLoader.discover("py_tests/test_extra", top_level_dir="py_tests/")
4,000
def cmd_te_activate(abs_filename): """最前面に持ってくる(テキストエディタ向け) ファイルが含まれるVisual Studioを探し出して最前面に持ってくる。 abs_filename- ファイル名の絶対パス (Ex.) c:/project/my_app/src/main.cpp """ return _te_main2(cmd_activate,abs_filename)
4,001
def search(search_domain, fmt=None): """Handle redirect from form submit.""" domain = tools.parse_post_data(search_domain) if domain is None: return handle_invalid_domain(search_domain) if fmt is None: if features.enable_async_search(): return flask.redirect('/search?ed={}'.format(search_domain)) else: return html_render(domain) elif fmt == 'json': return json_render(domain) elif fmt == 'csv': return csv_render(domain) else: flask.abort(400, 'Unknown export format: {}'.format(fmt))
4,002
def test_book_list_get_correct_auth_empty_for_user_with_no_books(testapp, one_user): """Test that GET to book-list route returns empty list for user without books.""" data = { 'email': one_user.email, 'password': 'password', } res = testapp.get('/books', data) assert res.json == []
4,003
def get_graph_from_particle_positions( particle_positions, box_lengths, cutoff_distance, store_positions=False ): """Returns a networkx graph of connections between neighboring particles Args: particle_positions (ndarray or dataframe): Shape (`n_particles`, `n_dimensions`). Each of the `n_particles` rows is a length `n_dimensions` particle position vector. Positions must be in range [0, `box_lengths[d]`) for each dimension `d`. box_lengths (ndarray): Shape (`n_dimensions`,) array of box lengths for each box dimension. cutoff_distance (float): Maximum length between particle pairs to consider them connected store_positions (bool, optional): If True, store position vector data within each node in the graph. Defaults to False. Returns: networkx Graph: Graph of connections between all particle pairs with distance below cutoff_distance """ distances = pairwise_distances(particle_positions, box_lengths) graph = get_within_cutoff_graph(distances, cutoff_distance) if store_positions is True: for particle_id, particle_position in zip( graph.nodes, particle_positions ): for i, x_i in enumerate(particle_position): graph.nodes[particle_id][f"x{i}"] = x_i return graph
4,004
def cca(x,y): """ canonical correlation analysis cca wx, wy, r = cca(x,y) returns wx, wy two matrices which columns [:,i] correspond to the canonical weights (normalized eigenvectors) and a vector r containing the canonical correlations, all sorted in decreasing order. cca assumes as input matrices x,y of size l*m (time*nvar), and l*n, that are centered (no mean along 1st axis) within the function. cca returns an error if either x,y are not full rank.""" import numpy as np mx = x.shape[1] my = y.shape[1] l = x.shape[0] #needs to be the same for y if l != y.shape[0]: raise ValueError('Time dimension is not same length for x,y') xrank = np.linalg.matrix_rank(x) yrank = np.linalg.matrix_rank(y) if mx > xrank: raise ValueError('Matrix x is not full rank.') if my > yrank: raise ValueError("Matrix y is not full rank.") #no mean x = x - np.outer(x.mean(axis=0),np.ones(l)).transpose() y = y - np.outer(y.mean(axis=0),np.ones(l)).transpose() #covariance estimators Sxy = np.dot(x.transpose(),y) / l Sxx = np.dot(x.transpose(),x) / l Syy = np.dot(y.transpose(),y) / l B1 = np.dot(np.linalg.inv(Sxx),Sxy) B2 = np.dot(np.linalg.inv(Syy),Sxy.transpose()) evalx, eigvx = np.linalg.eig(np.dot(B1,B2)) evaly, eigvy = np.linalg.eig(np.dot(B2,B1)) #normalize eigenvectors eigvx = eigvx / np.outer(np.ones((mx,1)),np.sqrt((eigvx**2).sum(axis=0))) eigvy = eigvy / np.outer(np.ones((my,1)),np.sqrt((eigvy**2).sum(axis=0))) # eigenvalues should be the same in evalx and evaly rx = np.sqrt(abs(evalx)) #correlation ry = np.sqrt(abs(evaly)) #sort ordargx = np.argsort(rx)[-1:-mx-1:-1] #decreasing order ordargy = np.argsort(ry)[-1:-mx-1:-1] rx = rx[ordargx] ry = ry[ordargy] eigvx = eigvx[:,ordargx] eigvy = eigvy[:,ordargy] if mx >= my: r = rx else: r = ry return eigvx, eigvy, r
4,005
def create_captcha(): """ 创建图片验证码 """ image = ImageCaptcha(fonts=DEFAULT_FONTS) code = gen_verify_code(4) stream = image.generate(code) # 图片的base64字符串格式:data:image/png;data,<base64字符串> print('===', str(base64.b64encode(stream.getvalue()), encoding='utf-8')) image.write(code, '{code}.png'.format(code=code))
4,006
def get_assay_table_path(dataset: TemplateDataset, configuration: dict) -> Path: """Retrieve the assay table file name that determined as a valid assay based on configuration. Specifically, defined in subsection 'ISA meta' :param dataset: A dataset object including a metadata component with an attached ISA archive data asset :type dataset: TemplateDataset :param configuration: Standard assay parsed config :type configuration: dict :return: Path to the found assay table :rtype: Path """ # retrieve study assay subtable from I_file df = dataset.metadata.isa_investigation_subtables["STUDY ASSAYS"] # get valid tuples of measurement and technology types from configuration valid_measurements_and_technology_types: list[tuple[str, str]] = [ (entry["measurement"], entry["technology"]) for entry in configuration["Valid Study Assay Technology And Measurement Types"] ] # check for matching rows based on configuration tuple # one and only one row should match # not very efficient, but table should never be too large for this to be of concern matches: list[Path] = list() for valid_combination in valid_measurements_and_technology_types: log.debug(f"Searching subtable for {valid_combination}") match_row = df.loc[ ( df[["Study Assay Measurement Type", "Study Assay Technology Type"]] == valid_combination ).all(axis="columns") ] match_file = [Path(val) for val in match_row["Study Assay File Name"].values] matches.extend(match_file) # guard, one and only one should match assert ( len(matches) == 1 ), f"One and only one should match, instead got these matches: {matches}" # load assay table assay_file_path = matches[0] [assay_path] = [ f for f in dataset.metadata.fetch_isa_files() if f.name == assay_file_path.name ] return assay_path
4,007
def train_models(vae, emulator, em_lr, vae_lr, signal_train, dataset, val_dataset, epochs, vae_lr_factor, em_lr_factor, vae_min_lr, em_min_lr, vae_lr_patience, em_lr_patience, lr_max_factor, es_patience, es_max_factor): """ Function that train the models simultaneously :param vae: Keras model object, the VAE :param emulator: Keras model object, the emulator :param em_lr: float, initial emulator learning rate :param vae_lr: float, initial VAE learning rate :param signal_train: numpy array of training signals :param dataset: batches from training dataset :param val_dataset: batches from validation dataset :param epochs: max number of epochs to train for, early stopping may stop it before :param vae_lr_factor: factor * old LR (learning rate) is the new LR for the VAE :param em_lr_factor: factor * old LR (learning rate) is the new LR for the emulator :param vae_min_lr: minimum allowed LR for VAE :param em_min_lr: minimum allowed LR for emulator :param vae_lr_patience: max number of epochs loss has not decreased for the VAE before reducing LR :param em_lr_patience: max number of epochs loss has not decreased for the emulator before reducing LR :param lr_max_factor: max_factor * current loss is the max acceptable loss, a larger loss means that the counter is added to, when it reaches the 'patience', the LR is reduced :param es_patience: max number of epochs loss has not decreased before early stopping :param es_max_factor: max_factor * current loss is the max acceptable loss, a larger loss for either the VAE or the emulator means that the counter is added to, when it reaches the 'patience', early stopping is applied :return tuple, four lists of losses as they change with epoch for the VAE (training loss and validation loss) and emulator (training and validation) in that order """ # initialize lists of training losses and validation losses vae_loss = [] vae_loss_val = [] em_loss = [] em_loss_val = [] # Did the model loss plateau? plateau_vae = False plateau_em = False vae_reduced_lr = 0 # epochs since last time lr was reduced em_reduced_lr = 0 # epochs since last time lr was reduced # compile the models compile_VAE(vae, vae_lr) compile_emulator(emulator, em_lr, signal_train) @tf.function def run_train_step(batch): """ Function that trains the VAE and emulator for one batch. Returns the losses for that specific batch. """ params = batch[0] signal = batch[1] amp_raw = batch[2] # amplitudes, raw because we need to reshape amplitudes = tf.expand_dims(amp_raw, axis=1) # reshape amplitudes signal_amplitudes = tf.concat((signal, amplitudes), axis=1) # both signal and amplitude with tf.GradientTape() as tape: vae_pred = vae(signal) # apply VAE to signal vae_batch_loss = vae.losses # get the loss # back-propagate losses for the VAE vae_gradients = tape.gradient(vae_batch_loss, vae.trainable_weights) vae.optimizer.apply_gradients(zip(vae_gradients, vae.trainable_weights)) # same procedure for emulator with tf.GradientTape() as tape: em_pred = emulator(params) loss_function = em_loss_fcn(signal_train) em_batch_loss = loss_function(signal_amplitudes, em_pred) em_gradients = tape.gradient(em_batch_loss, emulator.trainable_weights) emulator.optimizer.apply_gradients(zip(em_gradients, emulator.trainable_weights)) return vae_batch_loss, em_batch_loss # the training loop for i in range(epochs): epoch = int(i + 1) print("\nEpoch {}/{}".format(epoch, epochs)) # reduce lr if necessary if plateau_vae and vae_reduced_lr >= 5: reduce_lr(vae, vae_lr_factor, vae_min_lr) vae_reduced_lr = 0 if plateau_em and em_reduced_lr >= 5: reduce_lr(emulator, em_lr_factor, em_min_lr) em_reduced_lr = 0 vae_batch_losses = [] val_vae_batch_losses = [] em_batch_losses = [] val_em_batch_losses = [] # loop through the batches and train the models on each batch for batch in dataset: vae_batch_loss, em_batch_loss = run_train_step(batch) vae_batch_losses.append(vae_batch_loss) # append VAE train loss for this batch em_batch_losses.append(em_batch_loss) # append emulator train loss for this batch # loop through the validation batches, we are not training on them but # just evaluating and tracking the performance for batch in val_dataset: param_val = batch[0] signal_val = batch[1] amp_val = tf.expand_dims(batch[2], axis=1) val_signal_amplitudes = tf.concat((signal_val, amp_val), axis=1) val_em_batch_loss = emulator.test_on_batch(param_val, val_signal_amplitudes) val_vae_batch_loss = vae.test_on_batch(signal_val, signal_val) val_vae_batch_losses.append(val_vae_batch_loss) val_em_batch_losses.append(val_em_batch_loss) vae_loss_epoch = K.mean(tf.convert_to_tensor(vae_batch_losses)) # average VAE train loss over this epoch em_loss_epoch = K.mean(tf.convert_to_tensor(em_batch_losses)) # average emulator train loss print('VAE train loss: {:.4f}'.format(vae_loss_epoch)) print('Emulator train loss: {:.4f}'.format(em_loss_epoch)) # in case a loss is NaN # this is unusal, but not a big deal, just restart the training # (otherwise the loss just stays NaN) if np.isnan(vae_loss_epoch) or np.isnan(em_loss_epoch): print("Loss is NaN, restart training") break # save each epoch loss to a list with all epochs vae_loss.append(vae_loss_epoch) em_loss.append(em_loss_epoch) vae_loss_epoch_val = np.mean(val_vae_batch_losses) # average VAE train loss over this epoch em_loss_epoch_val = np.mean(val_em_batch_losses) # average emulator train loss vae_loss_val.append(vae_loss_epoch_val) em_loss_val.append(em_loss_epoch_val) print('VAE val loss: {:.4f}'.format(vae_loss_epoch_val)) print('Emulator val loss: {:.4f}'.format(em_loss_epoch_val)) # save weights if epoch == 1: # save first epoch vae.save('checkpoints/best_vae') emulator.save('checkpoints/best_em') elif em_loss_val[-1] < np.min(em_loss_val[:-1]): # performance is better than prev epoch vae.save('checkpoints/best_vae') emulator.save('checkpoints/best_em') # early stopping? keep_going = early_stop(es_patience, es_max_factor, vae_loss_val, em_loss_val) if not keep_going: break # check if loss stopped decreasing plateau_vae = plateau_check("vae", vae_lr_patience, lr_max_factor, vae_loss_val, em_loss_val) plateau_em = plateau_check("emulator", em_lr_patience, lr_max_factor, vae_loss_val, em_loss_val) vae_reduced_lr += 1 em_reduced_lr += 1 return vae_loss, vae_loss_val, em_loss, em_loss_val
4,008
def get_next_term(cfg): """ Gets the next term to be added. Args: cfg: Expression config """ term = {} if np.random.choice(['quantity', 'number'], p=[cfg.ratio, 1 - cfg.ratio]) == 'quantity': idx = np.random.choice(range(len(cfg.quants))) if cfg.reuse: term['expression'] = cfg.quants[idx] term['numerical'] = cfg.vals[idx] term['estimation_difficulty'] = cfg.diffs[idx] term['quantity_ids'] = [cfg.quantity_ids[idx]] term['categories'] = [cfg.categories[idx]] else: term['expression'] = cfg.quants.pop(idx) term['numerical'] = cfg.vals.pop(idx) term['estimation_difficulty'] = cfg.diffs.pop(idx) term['quantity_ids'] = [cfg.quantity_ids.pop(idx)] term['categories'] = [cfg.categories.pop(idx)] else: if len(cfg.numbers) != 200: # Where we're not using the default uniform sampling over numbers idx = int(np.random.lognormal(3, 8) + abs(np.random.normal(0, 50))) + 1 term['expression'] = str(idx) term['numerical'] = str(idx) term['estimation_difficulty'] = 0 term['quantity_ids'] = [] term['categories'] = [] else: idx = np.random.choice(range(len(cfg.numbers))) term['expression'] = str(idx) term['numerical'] = str(idx) term['estimation_difficulty'] = 0 term['quantity_ids'] = [] term['categories'] = [] return term
4,009
def test_grid_jan_2022(): """A specific unit test for January 31st, 2022 not showing up""" days = { date(2022, 1, 5): "E", date(2022, 1, 6): "C", date(2022, 1, 7): "A", date(2022, 1, 10): "F", date(2022, 1, 11): "D", date(2022, 1, 12): "B", date(2022, 1, 13): "G", date(2022, 1, 14): "E", date(2022, 1, 18): "C", date(2022, 1, 19): "A", date(2022, 1, 20): "F", date(2022, 1, 21): "D", date(2022, 1, 24): "B", date(2022, 1, 25): "G", date(2022, 1, 26): "E", date(2022, 1, 27): "C", date(2022, 1, 28): "A", date(2022, 1, 31): "F", } expected_headers = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday"] expected_grid = [ [ grids.GridItem(date(2022, 1, 3), None), grids.GridItem(date(2022, 1, 4), None), grids.GridItem(date(2022, 1, 5), "E"), grids.GridItem(date(2022, 1, 6), "C"), grids.GridItem(date(2022, 1, 7), "A"), ], [ grids.GridItem(date(2022, 1, 10), "F"), grids.GridItem(date(2022, 1, 11), "D"), grids.GridItem(date(2022, 1, 12), "B"), grids.GridItem(date(2022, 1, 13), "G"), grids.GridItem(date(2022, 1, 14), "E"), ], [ grids.GridItem(date(2022, 1, 17), None), grids.GridItem(date(2022, 1, 18), "C"), grids.GridItem(date(2022, 1, 19), "A"), grids.GridItem(date(2022, 1, 20), "F"), grids.GridItem(date(2022, 1, 21), "D"), ], [ grids.GridItem(date(2022, 1, 24), "B"), grids.GridItem(date(2022, 1, 25), "G"), grids.GridItem(date(2022, 1, 26), "E"), grids.GridItem(date(2022, 1, 27), "C"), grids.GridItem(date(2022, 1, 28), "A"), ], [ grids.GridItem(date(2022, 1, 31), "F"), grids.GridItem(date=None, letter=None, label=None), grids.GridItem(date=None, letter=None, label=None), grids.GridItem(date=None, letter=None, label=None), grids.GridItem(date=None, letter=None, label=None), ], ] expected = grids.CalendarGrid(title="January 2022", headers=expected_headers, grid=expected_grid) generator = grids.CalendarGridGenerator( date_letter_map=days, label_map={}, start_date=date(2022, 1, 1), end_date=date(2022, 1, 31)) actual = generator.get_grid() assert actual == expected
4,010
def test_get_evaldiff(): # ***Incomplete test """Test the get_evaldiff function in the search.py file. """ ########################## # Arrange. evalue1 = "evalue1" evalue2 = "evalue2" ########################## # Act. #x = get_evaldiff(evalue1, # evalue2) ########################## # Assert. assert True == True
4,011
def demand_share_per_timestep_constraint_rule(backend_model, group_name, carrier, timestep, what): """ Enforces shares of demand of a carrier to be met by the given groups of technologies at the given locations, in each timestep. The share is relative to ``demand`` technologies only. .. container:: scrolling-wrapper .. math:: \\sum_{loc::tech::carrier \\in given\\_group} carrier_{prod}(loc::tech::carrier, timestep) \\leq share \\times \\sum_{loc::tech:carrier \\in loc\\_techs\\_demand \\in given\\_locations} carrier_{con}(loc::tech::carrier, timestep) for timestep \\in timesteps """ share = get_param(backend_model, 'group_demand_share_per_timestep_{}'.format(what), (carrier, group_name)) if share is None: return return_noconstraint('demand_share_per_timestep', group_name) else: lhs_loc_tech_carriers, rhs_loc_tech_carriers = get_demand_share_lhs_and_rhs_loc_tech_carriers( backend_model, group_name, carrier ) lhs = sum( backend_model.carrier_prod[loc_tech_carrier, timestep] for loc_tech_carrier in lhs_loc_tech_carriers ) rhs = share * -1 * sum( backend_model.carrier_con[loc_tech_carrier, timestep] for loc_tech_carrier in rhs_loc_tech_carriers ) return equalizer(lhs, rhs, what)
4,012
def create_tables(cur, conn): """Loops through all queries and creates tables in redshift cluster""" for query in create_table_queries: cur.execute(query) conn.commit()
4,013
def b2str(data): """Convert bytes into string type.""" try: return data.decode("utf-8") except UnicodeDecodeError: pass try: return data.decode("utf-8-sig") except UnicodeDecodeError: pass try: return data.decode("ascii") except UnicodeDecodeError: return data.decode("latin-1")
4,014
def plotalphaerror(alphaarr,errorarr,errorlagarr): """ This will plot the error with respect then alpha parameter for the constraint. """ sns.set_style('whitegrid') sns.set_context('notebook') Nlag=errorlagarr.shape[-1] nlagplot=4. nrows=1+int(sp.ceil(float(Nlag)/(2*nlagplot))) fig=plt.figure(figsize=(8,4*nrows),facecolor='w') gs=gridspec.GridSpec(nrows,2) axmain=plt.subplot(gs[0,:]) axlist=[plt.subplot(gs[int(sp.floor(float(i)/2.)+1),int(sp.mod(i,2))]) for i in range(2*(nrows-1))] axmain.plot(alphaarr,errorarr) axmain.set_xscale('log') axmain.set_yscale('log') axmain.set_title('Error From All Lags Added',fontsize=fs) axmain.set_ylabel('Error',fontsize=fs) axmain.set_xlabel(r'$\gamma$',fontsize=fs) for iaxn,iax in enumerate(axlist): strlist=[] handlist=[] for ilag in range(int(nlagplot)): curlag=int(iaxn*nlagplot+ilag) if curlag>=Nlag: break handlist.append(iax.plot(alphaarr,errorlagarr[:,curlag])[0]) strlist.append('Lag {0}'.format(curlag)) iax.set_xscale('log') iax.set_yscale('log') iax.set_title('Error From Lags',fontsize=fs) iax.set_ylabel('Error',fontsize=fs) iax.set_xlabel(r'$\gamma$',fontsize=fs) iax.legend(handlist,strlist,loc='upper right',fontsize='large') plt.tight_layout() return(fig,axlist,axmain)
4,015
def fill_space(space, dim, size, minval, maxval, factor): """Fill a dim-dimensional discrete space of ℕ^{size} with some random hyperplane with values ranging from minval to maxval. Returns a ℕ^{size} array. Changes space in-place.""" offsets=[np.array([0]*dim)] return ndim_diamond_square_rec(space, dim, size, offsets, minval, maxval, factor)
4,016
def sum_to(n): """Return the sum of all interger number up to and including n""" S = 0 for i in range (n+1): S += i print(S)
4,017
def trigger_update_xblocks_cache_task(sender, course_key, **kwargs): # pylint: disable=unused-argument """ Trigger update_xblocks_cache() when course_published signal is fired. """ tasks = import_module('openedx.core.djangoapps.bookmarks.tasks') # Importing tasks early causes issues in tests. # Note: The countdown=0 kwarg is set to ensure the method below does not attempt to access the course # before the signal emitter has finished all operations. This is also necessary to ensure all tests pass. tasks.update_xblocks_cache.apply_async([str(course_key)], countdown=0)
4,018
def fracorder_lowshelving_eastty(w1, w2, G1, G2, rB=None): """ Parameters ---------- w1: float Lower corner frequency. w2: float Upper corner frequency. G1: float Target level at lower corner frequency in dB. G2: float Target level at upper corner frequency in dB. rB: float Gain per octave. Returns ------- z: array_like Complex zeros in the Laplace domain. p: array_like Complex poles in the Laplace domain. k: float Gain. """ Gd = G1 - G2 n_eff = effective_order(w1, w2, Gd, rB) n_int, n_frac = np.divmod(n_eff, 1) n_int = int(n_int) z = np.array([]) p = np.array([]) # Second-order sections (complex conjugate pole/zero pairs) if n_int > 0: alpha = complex_zp_angles(n_int, n_frac) alpha = np.concatenate((alpha, -alpha)) z = w1 * np.exp(1j * alpha) p = w2 * np.exp(1j * alpha) # First-order section (real pole/zero) if n_eff % 2 != 0: s_lower, s_upper = real_zp(n_int, n_frac, w1, w2) if n_int % 2 == 0: z_real = s_lower p_real = s_upper elif n_int % 2 == 1: z_real = s_upper p_real = s_lower z = np.append(z, z_real) p = np.append(p, p_real) return z, p, 1
4,019
def get_cookie_date(date): """ Return a date string in a format suitable for cookies (https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Date) :param date: datetime object :return: date string in cookie format """ return date.strftime("%a, %d %b %Y %H:%M:%S GMT")
4,020
def fill_block_with_call(newblock, callee, label_next, inputs, outputs): """Fill *newblock* to call *callee* with arguments listed in *inputs*. The returned values are unwraped into variables in *outputs*. The block would then jump to *label_next*. """ scope = newblock.scope loc = newblock.loc fn = ir.Const(value=callee, loc=loc) fnvar = scope.make_temp(loc=loc) newblock.append(ir.Assign(target=fnvar, value=fn, loc=loc)) # call args = [scope.get_exact(name) for name in inputs] callexpr = ir.Expr.call(func=fnvar, args=args, kws=(), loc=loc) callres = scope.make_temp(loc=loc) newblock.append(ir.Assign(target=callres, value=callexpr, loc=loc)) # unpack return value for i, out in enumerate(outputs): target = scope.get_exact(out) getitem = ir.Expr.static_getitem(value=callres, index=i, index_var=None, loc=loc) newblock.append(ir.Assign(target=target, value=getitem, loc=loc)) # jump to next block newblock.append(ir.Jump(target=label_next, loc=loc)) return newblock
4,021
def pairplot_correlations(data, sample=30000): """ :param data: a DataFrame file. :param sample: the amount of data points to sample from (default = 30000). :return: generates a pairwise plot relationship of the data set. """ # pairwise relationship of the features distributions in the dataset. sn.pairplot(data.sample(sample), hue="label") plt.savefig(args.output_file + "/feature_selection_by_correlation_pairplot.png")
4,022
def terraform_write_variables(configs: Dict, variables_to_exclude: List) -> str: """Write out given config object as a Terraform variables JSON file. Persist variables to Terraform state directory. These variables are used on apply / plan, and are required for deprovisioning. """ det_version = configs.get("det_version") if not det_version or not isinstance(det_version, str): print("ERROR: Determined version missing or invalid") sys.exit(1) # Add GCP-friendly version key to configs. We persist this since it's used # across the cluster lifecycle: to name resources on provisioning, and to # filter for the master and dynamic agents on deprovisioning. configs["det_version_key"] = det_version.replace(".", "-")[0:8] # Track the default zone in configuration variables. This is needed # during deprovisioning. if "zone" not in configs: configs["zone"] = f"{configs['region']}-b" vars_file_path = get_terraform_vars_file_path(configs) tf_vars = {k: configs[k] for k in configs if k not in variables_to_exclude} with open(vars_file_path, "w") as f: json.dump(tf_vars, f) return vars_file_path
4,023
def data_context_service_interface_pointuuid_media_channel_service_interface_point_spec_mc_pool_available_spectrumupper_frequencylower_frequency_frequency_constraint_get(uuid, upper_frequency, lower_frequency): # noqa: E501 """data_context_service_interface_pointuuid_media_channel_service_interface_point_spec_mc_pool_available_spectrumupper_frequencylower_frequency_frequency_constraint_get returns tapi.photonic.media.FrequencyConstraint # noqa: E501 :param uuid: Id of service-interface-point :type uuid: str :param upper_frequency: Id of available-spectrum :type upper_frequency: int :param lower_frequency: Id of available-spectrum :type lower_frequency: int :rtype: TapiPhotonicMediaFrequencyConstraint """ return 'do some magic!'
4,024
def get_ems_config(cluster: str, headers_inc: str): """Fetches the EMS configuration""" url = "https://{}/api/support/ems/".format(cluster) try: response = requests.get(url, headers=headers_inc, verify=False) except requests.exceptions.HTTPError as err: print(err) sys.exit(1) except requests.exceptions.RequestException as err: print(err) sys.exit(1) tmp = dict(response.json()) print("\nEMS Configuration:- ") print("=====================") print("Mail_from = %s " % tmp['mail_from']) print("Mail_server = %s " % tmp['mail_server']) print("=====================")
4,025
def image_to_string(filename): """Generate a string representation of the image at the given path, for embedding in code.""" image = pyglet.image.load(filename) data = image.get_data('LA', 16) s = '' for x in data: s += "\\x%02x" % (ord(x)) return s
4,026
def init_build(build_dir, started=True, finished=True): """Create faked files for a build.""" if started: write(build_dir + 'started.json', {'version': 'v1+56', 'timestamp': 1406535800}) if finished: write(build_dir + 'finished.json', {'result': 'SUCCESS', 'timestamp': 1406536800}) write(build_dir + 'artifacts/junit_01.xml', JUNIT_SUITE)
4,027
def test_client_can_be_created(unauth_client): """Test that the client object can be created.""" assert isinstance(unauth_client, AvataxClient)
4,028
def mpf_connectome( mc, num_sampled, max_depth, args_dict, clt_start=10, sr=0.01, mean_estimate=False ): """Perform mpf statistical calculations on the mouse connectome.""" args_dict["max_depth"] = max_depth args_dict["total_samples"] = num_sampled[0] args_dict["static_verbose"] = False args_dict["clt_start"] = clt_start args_dict["mean_estimate"] = mean_estimate if max_depth > 1: sr = None if mean_estimate is True: sr = None cp = CombProb( mc.num_a, num_sampled[0], mc.num_senders, mc.num_b, num_sampled[1], MatrixConnectivity.static_expected_connections, verbose=True, subsample_rate=sr, **args_dict ) result = { "expected": cp.expected_connections(), "total": cp.get_all_prob(), "each_expected": {k: cp.expected_total(k) for k in range(num_sampled[0] + 1)}, } return result
4,029
def valuedict(keys, value, default): """ Build value dictionary from a list of keys and a value. Parameters ---------- keys: list The list of keys value: {dict, int, float, str, None} A value or the already formed dictionary default: {int, float, str} A default value to set if no value Returns ------- dict A dictionary Notes ----- This standalone and generic function is only required by plotters. """ if isinstance(value, dict): return {key: value.get(key, default) for key in keys} else: return dict.fromkeys(keys, value or default)
4,030
async def chunks(request): """A handler that sends chunks at a slow pace. The browser will download the page over the range of 2 seconds, but only displays it when done. This e.g. allows streaming large files without using large amounts of memory. """ async def iter(): yield "<html><head></head><body>" yield "Here are some chunks dripping in:<br>" for i in range(20): await asgineer.sleep(0.1) yield "CHUNK <br>" yield "</body></html>" return 200, {"content-type": "text/html"}, iter()
4,031
def start_vitess(): """This is the main start function.""" topology = vttest_pb2.VTTestTopology() keyspace = topology.keyspaces.add(name='user') keyspace.shards.add(name='-80') keyspace.shards.add(name='80-') keyspace = topology.keyspaces.add(name='lookup') keyspace.shards.add(name='0') vttop = os.environ['VTTOP'] args = [os.path.join(vttop, 'py/vttest/run_local_database.py'), '--port', '12345', '--proto_topo', text_format.MessageToString(topology, as_one_line=True), '--web_dir', os.path.join(vttop, 'web/vtctld'), '--schema_dir', os.path.join(vttop, 'examples/demo/schema')] sp = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE) # This load will make us wait for vitess to come up. json.loads(sp.stdout.readline()) return sp
4,032
def get_zero_ranges(*args): """ get_zero_ranges(zranges, range) -> bool Return set of ranges with zero initialized bytes. The returned set includes only big zero initialized ranges (at least >1KB). Some zero initialized byte ranges may be not included. Only zero bytes that use the sparse storage method (STT_MM) are reported. @param zranges: pointer to the return value. cannot be NULL (C++: rangeset_t *) @param range: the range of addresses to verify. can be NULL - means all ranges (C++: const range_t *) @return: true if the result is a non-empty set """ return _ida_bytes.get_zero_ranges(*args)
4,033
def fista_step(L, Wd, X, alpha, last_Z): """ Calculates the next sparse code for the FISTA algorithm Dimension notation: B - Number of samples. Usually number of patches in image times batch size K - Number of atoms in dictionary d - Dimensionality of atoms in dictionary :param X: Input - Signal to find sparse coding against. Dimensions: d X B :param Wd: Dictionary - Tensor of atoms we want to get a sparse linear combination of. Dimensions: d X K :param alpha: Float. Sparsity weight :param L: Float. Largest eigenvalue in Wd :param last_Z: Sparse code from previous step. Dimensions: K x B :return: Z: linear coefficients for Sparse Code solution. Dimensions: K x B """ quantization_distance = Wd.mm(last_Z) - X.to(Wd.device) normalized_dictionary = Wd.t() / L normalized_quantization_projection = normalized_dictionary.mm(quantization_distance) cur_Z = last_Z - normalized_quantization_projection cur_Z = shrink_function(cur_Z, alpha / L) return cur_Z
4,034
def get_and_validate_certs_for_replacement( default_cert_location, default_key_location, default_ca_location, new_cert_location, new_key_location, new_ca_location): """Validates the new certificates for replacement. This function validates the new specified certificates for replacement, based on the new certificates specified and the current ones. E.g. if onlt a new certificate and key were specified, then it will validate them with the current CA. """ cert_filename, key_filename = get_cert_and_key_filenames( new_cert_location, new_key_location, default_cert_location, default_key_location) ca_filename = get_ca_filename(new_ca_location, default_ca_location) validate_certificates(cert_filename, key_filename, ca_filename) return cert_filename, key_filename, ca_filename
4,035
async def end_session(ctx, tutor, account): """remove the tutor object from tutor accounts. :param Context ctx: the current Context. :param 'Worker' tutor: the object that represents a tutor. :param {} account: the dictionary that stores the tutor objects. """ try: # remove tutor object from accounts. account.pop(tutor.ctx.discord_id()) # display confirmation. await send_embed(ctx, text=f'{tutor.ctx.mention()} thank you!') except AttributeError: await send_embed(ctx, text='*tutoring session not found.*') # send the tutor the sign-in sheet. await generate_sing_in_sheet(ctx, tutor)
4,036
def fail_json(*args, **kwargs): # pylint: disable=unused-argument """function to patch over fail_json; package return data into an exception""" kwargs['failed'] = True raise AnsibleFailJson(kwargs)
4,037
def entity_sentiment_text(text): """Detects entity sentiment in the provided text.""" language_client = language_service_client.LanguageServiceClient() document = language_service_pb2.Document() if isinstance(text, six.binary_type): text = text.decode('utf-8') document.content = text.encode('utf-8') document.type = enums.Document.Type.PLAIN_TEXT encoding = enums.EncodingType.UTF32 if sys.maxunicode == 65535: encoding = enums.EncodingType.UTF16 result = language_client.analyze_entity_sentiment( document, encoding) for entity in result.entities: print('Mentions: ') print(u'Name: "{}"'.format(entity.name)) for mention in entity.mentions: print(u' Begin Offset : {}'.format(mention.text.begin_offset)) print(u' Content : {}'.format(mention.text.content)) print(u' Magnitude : {}'.format(mention.sentiment.magnitude)) print(u' Sentiment : {}'.format(mention.sentiment.score)) print(u' Type : {}'.format(mention.type)) print(u'Salience: {}'.format(entity.salience)) print(u'Sentiment: {}\n'.format(entity.sentiment))
4,038
def build_model(cfg): """ Built the whole model, defined by `cfg.model.name`. """ name = cfg.model.name return META_ARCH_REGISTRY.get(name)(cfg)
4,039
def download_n_parse_3k(url): """ Gets the article's metadata Args: url: The article's URL """ article3k = Article(url) try: article3k.download() article3k.parse() except Exception: print(f"Download or Parse:\t{url}") return return article3k.text
4,040
def build_model(images, datasets, epochs=None, log=False, use_model=None, save_model='model', pretrained=True): """Run the training regime on the model and save its best effort""" num_epochs = epochs if not num_epochs: num_epochs = EPOCHS model_ft = initialise_model(images, use_model=use_model, pretrained=pretrained) criterion = nn.CrossEntropyLoss() optimizer_ft = optim.SGD(model_ft.parameters(), lr=LEARN_RATE, momentum=MOMENTUM) # Decay LR by a factor of 0.1 every 7 epochs exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1) model_ft = train_model(model_ft, images, datasets, criterion, optimizer_ft, exp_lr_scheduler, num_epochs=num_epochs, log=log) torch.save(model_ft, os.path.join(os.getcwd(), save_model))
4,041
def crop_to_square(img, target_size=None): """ Takes numpy array img and converts it to a square by trimming :param img: np.array representing image :param target_size: optionally specify target size. If None, will return min(l, w) x min(l, w) :return: np.array """ l, w = img.shape img_copy = img.copy() if l > w: delta = l - w cropped_img = img_copy[delta // 2: -delta + delta // 2, :] elif l < w: delta = w - l cropped_img = img_copy[:, delta // 2: -delta + delta // 2] else: cropped_img = img_copy if target_size: current_size = cropped_img.shape[0] # should be a square center = max(target_size, current_size) // 2 offset_min = center - min(target_size, current_size) // 2 offset_max = offset_min + min(target_size, current_size) if target_size > current_size: new_image = np.zeros((target_size, target_size)) new_image[offset_min:offset_max, offset_min:offset_max] = cropped_img cropped_img = new_image.copy() else: cropped_img = cropped_img[offset_min:offset_max, offset_min:offset_max] return np.asarray(cropped_img, dtype=np.float32)
4,042
def reset(): """ Resets the built-in Layer dictionary (controls the coloring in quickplot() ), and sets the Device universal ID (uid) to zero. """ Layer.layer_dict = {} Device._next_uid = 0
4,043
def reboot(name, path=None): """ Reboot a container. path path to the container parent default: /var/lib/lxc (system default) .. versionadded:: 2015.8.0 CLI Examples: .. code-block:: bash salt 'minion' lxc.reboot myvm """ ret = {"result": True, "changes": {}, "comment": "{0} rebooted".format(name)} does_exist = exists(name, path=path) if does_exist and (state(name, path=path) == "running"): try: stop(name, path=path) except (SaltInvocationError, CommandExecutionError) as exc: ret["comment"] = "Unable to stop container: {0}".format(exc) ret["result"] = False return ret if does_exist and (state(name, path=path) != "running"): try: start(name, path=path) except (SaltInvocationError, CommandExecutionError) as exc: ret["comment"] = "Unable to stop container: {0}".format(exc) ret["result"] = False return ret ret["changes"][name] = "rebooted" return ret
4,044
def find(domain): """ Finds connected domains within a domain. A domain is defined to be a connected region of lattice points, subject to periodic boundary conditions. Parameters ---------- domain : :py:class:`~fieldkit.mesh.Domain` The set of nodes to seek connected domains in. Returns ------- tuple A tuple of all :py:class:`~fieldkit.mesh.Domain` objects identified within the `domain`. At most, there is only one domain returned, but many can be identified if the points in the `domain` are highly disconnected. Notes ----- The connected domains are determined using a graph-based approach, which requires the `networkx` package. Performance is generally good, but the algorithm may struggle for large numbers of nodes or domains. """ comps = networkx.connected_components(domain.graph) return tuple([Domain(domain.mesh,list(c)) for c in comps])
4,045
def url_to_license(url): """Given a URL, return the license as a license/version tuple""" (scheme, netloc, path, *remainder) = urlparse(url) path_parts = path.split('/') if len(path_parts) < 4: raise LicenseException("Did not get 4 path segments, probably not a CC license URL") license = path_parts[2].upper() # First is '', because it starts with a leading / version = path_parts[3] # Handle the PD licenses as special-cases if license == 'ZERO': license = 'CC0' version = '1.0' if license == 'MARK': license = 'PDM' version = '1.0' if license not in LICENSE_LIST: raise LicenseException("License fragment %s was not a valid license", license) return (license, version)
4,046
def dsystem_dt(request): """Test systems for test_discrete""" # SISO state space systems with either fixed or unspecified sampling times sys = rss(3, 1, 1) # MIMO state space systems with either fixed or unspecified sampling times A = [[-3., 4., 2.], [-1., -3., 0.], [2., 5., 3.]] B = [[1., 4.], [-3., -3.], [-2., 1.]] C = [[4., 2., -3.], [1., 4., 3.]] D = [[-2., 4.], [0., 1.]] dt = request.param systems = {'sssiso': StateSpace(sys.A, sys.B, sys.C, sys.D, dt), 'ssmimo': StateSpace(A, B, C, D, dt), 'tf': TransferFunction([2, 1], [2, 1, 1], dt)} return systems
4,047
def update_attributes(dsFolder: types.GirderModel, data: dict): """Upsert or delete attributes""" crud.verify_dataset(dsFolder) validated: AttributeUpdateArgs = crud.get_validated_model(AttributeUpdateArgs, **data) attributes_dict = fromMeta(dsFolder, 'attributes', {}) for attribute_id in validated.delete: attributes_dict.pop(str(attribute_id), None) for attribute in validated.upsert: attributes_dict[str(attribute.key)] = attribute.dict(exclude_none=True) upserted_len = len(validated.delete) deleted_len = len(validated.upsert) if upserted_len or deleted_len: update_metadata(dsFolder, {'attributes': attributes_dict}) return { "updated": upserted_len, "deleted": deleted_len, }
4,048
def machado_et_al_2009_matrix_protanomaly(severity): """Retrieve a matrix for simulating anomalous color vision. :param cvd_type: One of "protanomaly", "deuteranomaly", or "tritanomaly". :param severity: A value between 0 and 100. :returns: A 3x3 CVD simulation matrix as computed by Machado et al (2009). These matrices were downloaded from: http://www.inf.ufrgs.br/~oliveira/pubs_files/CVD_Simulation/CVD_Simulation.html which is supplementary data from :cite:`Machado-CVD`. If severity is a multiple of 10, then simply returns the matrix from that webpage. For other severities, performs linear interpolation. """ MACHADO_ET_AL_MATRIX_protanomaly = np.array( ( ( [1.000000, 0.000000, -0.000000], [0.000000, 1.000000, 0.000000], [-0.000000, -0.000000, 1.000000], ), ( [0.856167, 0.182038, -0.038205], [0.029342, 0.955115, 0.015544], [-0.002880, -0.001563, 1.004443], ), ( [0.734766, 0.334872, -0.069637], [0.051840, 0.919198, 0.028963], [-0.004928, -0.004209, 1.009137], ), ( [0.630323, 0.465641, -0.095964], [0.069181, 0.890046, 0.040773], [-0.006308, -0.007724, 1.014032], ), ( [0.539009, 0.579343, -0.118352], [0.082546, 0.866121, 0.051332], [-0.007136, -0.011959, 1.019095], ), ( [0.458064, 0.679578, -0.137642], [0.092785, 0.846313, 0.060902], [-0.007494, -0.016807, 1.024301], ), ( [0.385450, 0.769005, -0.154455], [0.100526, 0.829802, 0.069673], [-0.007442, -0.022190, 1.029632], ), ( [0.319627, 0.849633, -0.169261], [0.106241, 0.815969, 0.077790], [-0.007025, -0.028051, 1.035076], ), ( [0.259411, 0.923008, -0.182420], [0.110296, 0.804340, 0.085364], [-0.006276, -0.034346, 1.040622], ), ( [0.203876, 0.990338, -0.194214], [0.112975, 0.794542, 0.092483], [-0.005222, -0.041043, 1.046265], ), ( [0.152286, 1.052583, -0.204868], [0.114503, 0.786281, 0.099216], [-0.003882, -0.048116, 1.051998], ), ), dtype=np.float64, ) assert 0 <= severity <= 100 fraction = severity % 10 low = int(severity - fraction) // 10 high = low + 1 # assert low <= severity <= high low_matrix = MACHADO_ET_AL_MATRIX_protanomaly[low] if severity == 100: # Don't try interpolating between 100 and 110, there is no 110... return low_matrix high_matrix = MACHADO_ET_AL_MATRIX_protanomaly[high] return (1 - fraction / 10.0) * low_matrix + fraction / 10.0 * high_matrix
4,049
def derivative_compliance(coord, connect, E, v, rho, alpha, beta, omega_par, p_par, q_par, x_min_m, x_min_k, xval, disp_vector, lam): """ calculates the derivative of the compliance function. Args: coord (:obj:`numpy.array`): Coordinates of the element. connect (:obj:`numpy.array`): Element connectivity. E (:obj:`float`): Elastic modulus. v (:obj:`float`): Poisson's ratio. rho (:obj:`float`): Density. alpha (:obj:`float`): Damping coefficient proportional to mass. beta (:obj:`float`): Damping coefficient proportional to stiffness. omega_par (:obj:`float`): 2 * pi * frequency p_par (:obj:`float`): Penalization power to stiffness. q_par (:obj:`float`): Penalization power to mass. x_min_m (:obj:`float`): Minimum relative densities to mass. x_min_k (:obj:`float`): Minimum relative densities to stiffness. xval (:obj:`numpy.array`): Indicates where there is mass. disp_vector (:obj:`numpy.array`): Displacement vector. lam (:obj:`float`): Lambda parameter. Returns: Derivative of the compliance function. """ deriv_f = np.empty((len(connect), 1)) dofs = 2 ind_dofs = (np.array([dofs*connect[:,1]-1, dofs*connect[:,1], dofs*connect[:,2]-1, dofs*connect[:,2], dofs*connect[:,3]-1, dofs*connect[:,3], dofs*connect[:,4]-1, dofs*connect[:,4]], dtype=int)-1).T for el in range(len(connect)): Ke, Me = fc.matricesQ4(el, coord, connect, E, v, rho) ind = ind_dofs[el, :] dKe = p_par * (xval[el]**(p_par - 1))*(1-x_min_k) * Ke dCe = alpha * Me + beta * dKe if xval[el]>0.1: dMe = q_par * (xval[el]**(q_par - 1))*(1-x_min_m) * Me else: dMe = ((9*3.512e7*xval[el]**8 - 10*2.081e8*xval[el]**9)*(1-x_min_m) ) * Me dKed = dKe + omega_par * 1j * dCe - (omega_par**2) * dMe deriv_f[el, 0] = (-lam *(disp_vector[ind].reshape(1, 8)@dKed@disp_vector[ind].reshape(8, 1)))[0,0].real return deriv_f
4,050
def main(): """Read input and print output cost of 3 entities generating costs""" cost_a, cost_b, cost_c = [int(i) for i in input().split()] parked = [0] * 102 for _ in range(3): arrive, depart = [int(i) for i in input().split()] for i in range(arrive, depart): parked[i] += 1 result = sum([1 for trucks in parked if trucks == 1]) * cost_a result += sum([2 for trucks in parked if trucks == 2]) * cost_b result += sum([3 for trucks in parked if trucks == 3]) * cost_c print(result)
4,051
def test_status_string(app, authed_client, status_code, status): """The status string should populate itself based on status code.""" @app.route('/test_endpoint') def test_endpoint(): return flask.jsonify('test'), status_code response = authed_client.get('/test_endpoint') assert response.get_json() == {'response': 'test', 'status': status}
4,052
def lanc(numwt, haf): """Generates a numwt + 1 + numwt lanczos cosine low pass filter with -6dB (1/4 power, 1/2 amplitude) point at haf Parameters ---------- numwt : int number of points haf : float frequency (in 'cpi' of -6dB point, 'cpi' is cycles per interval. For hourly data cpi is cph, Examples -------- >>> from datetime import datetime >>> import matplotlib.pyplot as plt >>> t = np.arange(500) # Time in hours. >>> h = 2.5 * np.sin(2 * np.pi * t / 12.42) >>> h += 1.5 * np.sin(2 * np.pi * t / 12.0) >>> h += 0.3 * np.random.randn(len(t)) >>> wt = lanc(96+1+96, 1./40) >>> low = np.convolve(wt, h, mode='same') >>> high = h - low >>> fig, (ax0, ax1) = plt.subplots(nrows=2) >>> _ = ax0.plot(high, label='high') >>> _ = ax1.plot(low, label='low') >>> _ = ax0.legend(numpoints=1) >>> _ = ax1.legend(numpoints=1) """ summ = 0 numwt += 1 wt = np.zeros(numwt) # Filter weights. ii = np.arange(numwt) wt = 0.5 * (1.0 + np.cos(np.pi * ii * 1. / numwt)) ii = np.arange(1, numwt) xx = np.pi * 2 * haf * ii wt[1:numwt + 1] = wt[1:numwt + 1] * np.sin(xx) / xx summ = wt[1:numwt + 1].sum() xx = wt.sum() + summ wt /= xx return np.r_[wt[::-1], wt[1:numwt + 1]]
4,053
def normalize_target_taxa(target_taxa): """ Receives a list of taxa IDs and/or taxa names and returns a set of expanded taxids numbers """ from ete3 import NCBITaxa ncbi = NCBITaxa() expanded_taxa = set() for taxon in target_taxa: taxid = "" try: taxid = int(taxon) except ValueError: taxid = ncbi.get_name_translator([taxon])[taxon][0] else: taxon = ncbi.get_taxid_translator([taxid])[taxid] species = ncbi.get_descendant_taxa(taxid, collapse_subspecies=False) for sp in species: expanded_taxa.add(sp) return expanded_taxa
4,054
async def async_setup_entry(hass, config_entry, async_add_entities): """Set up the Fully Kiosk Browser switch.""" coordinator = hass.data[DOMAIN][config_entry.entry_id] async_add_entities([FullyScreenSaverSwitch(hass, coordinator)], False) async_add_entities([FullyMaintenanceModeSwitch(hass, coordinator)], False) async_add_entities([FullyKioskLockSwitch(hass, coordinator)], False) async_add_entities([FullyKioskMotionDetectionSwitch(hass, coordinator)], False)
4,055
def filled (a, value = None): """a as a contiguous numeric array with any masked areas replaced by value if value is None or the special element "masked", get_fill_value(a) is used instead. If a is already a contiguous numeric array, a itself is returned. filled(a) can be used to be sure that the result is numeric when passing an object a to other software ignorant of MA, in particular to numeric itself. """ if isinstance(a, MaskedArray): return a.filled(value) elif isinstance(a, ndarray) and a.flags['CONTIGUOUS']: return a elif isinstance(a, types.DictType): return numeric.array(a, 'O') else: return numeric.array(a)
4,056
def get_generic_global(section, prop): """Generic getter for getting a property""" if section is None: raise GlobalPropertyError("Section cannot be null!") elif prop is None: raise GlobalPropertyError("Property cannot be null!") global_conf = configparser.ConfigParser() global_conf.read(DTF_GLOBAL_CONFIG) try: return global_conf.get(section, prop) except configparser.NoSectionError: raise GlobalPropertyError("Section not found: %s" % section) except configparser.NoOptionError: raise GlobalPropertyError("Property not found: %s" % prop)
4,057
def get_mspec_descriptors(mod, mod_lim=20, freq_lim=8000, n_mod_bin=20, n_freq_bin=20): """ Parameters ---------- mod : 2D Numpy array Modulation spectrogram mod_lim : int Upper limit of modulation frequency. The default is 20. freq_lim : int Upper limit of frequency. The default is 8000. n_mod_bin : int, optional Number of modulation frequency bins. The default is 20. n_freq_bin : int, optional Number of frequency bins. The default is 20. Returns ------- Modulation spectrogram descriptors: 1D numpy array """ n_fea = 8 #Number of features to compute mod = 10**(mod/10) #Convert energies in dB to original values n_mod_bin = n_mod_bin #Number of modulation frequency bins n_freq_bin = n_freq_bin #Number of conventional frequency bins mod = np.reshape(mod,(n_freq_bin, n_mod_bin)) #Reshape psd matrix ds_mod = np.empty((n_mod_bin,n_fea))*np.nan #Initialize a matrix to store descriptors in all bins ds_freq = np.empty((n_freq_bin,n_fea))*np.nan def get_subband_descriptors(psd, freq_range): #Initialize a matrix to store features ft=np.empty((8))*np.nan lo,hi = freq_range[0], freq_range[-1]#Smallest and largest value of freq_range #Centroid ft[0] = np.sum(psd*freq_range)/np.sum(psd) #Entropy ft[1]=-np.sum(psd*np.log(psd))/np.log(hi-lo) #Spread ft[2]=np.sqrt(np.sum(np.square(freq_range-ft[0])*psd)/np.sum(psd)) #skewness ft[3]=np.sum(np.power(freq_range-ft[0],3)*psd)/(np.sum(psd)*ft[2]**3) #kurtosis ft[4]=np.sum(np.power(freq_range-ft[0],4)*psd)/(np.sum(psd)*ft[2]**4) #flatness arth_mn=np.mean(psd)/(hi-lo) geo_mn=np.power(np.exp(np.sum(np.log(psd))),(1/(hi-lo))) ft[5]=geo_mn/arth_mn #crest ft[6]=np.max(psd)/(np.sum(psd)/(hi-lo)) #flux ft[7]=np.sum(np.abs(np.diff(psd))) return ft #Loop through all modulation frequency bands freq_bin_width = freq_lim/n_freq_bin mod_bin_width = mod_lim/n_mod_bin freq = np.arange(0,freq_lim,freq_bin_width)+freq_bin_width/2 #List of center values of frequency bins mod_freq = np.arange(0,mod_lim,mod_bin_width)+mod_bin_width/2 #List of center values of modulation frequency bins #Calculate features for each modulation frequency bin for mod_band in np.arange(n_mod_bin): ds_mod[mod_band,:] = get_subband_descriptors(mod[:,mod_band], freq) #Calculate features for each conventional frequency bin for freq_band in np.arange(n_freq_bin): ds_freq[freq_band,:] = get_subband_descriptors(mod[freq_band,:], mod_freq) return np.concatenate((np.reshape(ds_mod, (8*n_mod_bin)), np.reshape(ds_freq, (8*n_freq_bin))),axis=None)
4,058
def set_environment_variables_for_multi_node() -> None: """ Sets the environment variables that PyTorch Lightning needs for multi-node training. """ az_master_node = "AZ_BATCHAI_MPI_MASTER_NODE" master_addr = "MASTER_ADDR" master_ip = "MASTER_IP" master_port = "MASTER_PORT" world_rank = "OMPI_COMM_WORLD_RANK" node_rank = "NODE_RANK" if az_master_node in os.environ: # For AML BATCHAI os.environ[master_addr] = os.environ[az_master_node] elif master_ip in os.environ: # AKS os.environ[master_addr] = os.environ[master_ip] else: logging.info("No settings for the MPI central node found. Assuming that this is a single node training job.") return if master_port not in os.environ: os.environ[master_port] = "6105" if world_rank in os.environ: os.environ[node_rank] = os.environ[world_rank] # node rank is the world_rank from mpi run for var in [master_addr, master_port, node_rank]: print(f"Distributed training: {var} = {os.environ[var]}")
4,059
def check_single_table_dataset(dataset, expected_table=None): """ Raise if the given dataset is not a single-table dataset. Parameters ---------- dataset: kartothek.core.dataset.DatasetMetadata The dataset to be validated expected_table: Optional[str] Ensure that the table in the dataset is the same as the given one. """ if len(dataset.tables) > 1: raise TypeError( "Expected single table dataset but found dataset with tables: `{}`".format( dataset.tables ) ) if expected_table and dataset.tables != [expected_table]: raise TypeError( "Unexpected table in dataset:\nFound:\t{}\nExpected:\t{}".format( dataset.tables, expected_table ) )
4,060
def mustachify( file, mustache_file="mustache.png", rotation=True, perspective=False, # TODO add perspective transformation modelsize="small", ): """ Pastes a mustache on each face in the image file :param file: image file name or file object to load :param mustache_file: file pointer to mustache png :return: PIL image object with mustache on each face """ if modelsize not in ("small", "large"): raise ValueError("Landmarks model should be \"small\" or \"large\"") # load file to img img_array = load_image_file(file) # get landmarks of all faces locations = face_locations(img_array, number_of_times_to_upsample=1) landmarks = face_landmarks(img_array, face_locations=None, model=modelsize) # create PIL object for img and drawing img = Image.fromarray(img_array) draw = ImageDraw.Draw(img) # load mustache mustache = Image.open(mustache_file) # loop over each face for landmark in landmarks: mask = rotate(img=mustache, landmark=landmark) mask = scale(img=mask, landmark=landmark, scale=1.3) mask = removePadding(mask) if modelsize=="small": nose = landmark["nose_tip"][0] elif modelsize=="large": nose = landmark["nose_tip"][2] midpoint = (round(mask.size[0]/2), round(mask.size[1]/2.8)) position = (nose[0] - midpoint[0], nose[1] - midpoint[1]) img.paste(mask, position, mask) return img
4,061
def get_available_gpus(): """Return a list of available GPUs with their names""" cmd = 'nvidia-smi --query-gpu=name --format=csv,noheader' process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, _ = process.communicate() if process.returncode == 0: return stdout.decode().splitlines() return []
4,062
def monitor_promo(bot, job): """ Job to Send Promo Message """ msg = promo_alert.get_new_codes_message() if msg is None: print('No new promos') else: text_bot = ['Uber poked me privately and said this :wink:', 'I found this promo code while I was in my ActiveWear! :stuck_out_tongue:', 'Quick apply the code! Later run out dont cry :sunglasses:', 'Breaking News Brought to you by ShiokBot!', ] all_users = promo_alert.get_all_users() to_send = chunks(all_users, 10) for group in to_send: for user in group: try: bot.sendMessage(int(user), text=emojize(random.choice( text_bot), use_aliases=True), parse_mode='HTML') bot.sendMessage(int(user), text=msg, parse_mode='HTML') except: print("Error! Sending Message to " + str(user)) sleep(1.2)
4,063
def register_image_array(img, img_name, img_desc, project_id, sample_id, usr, pwd, host, port=4064): """ This function imports a 5D (time-points, channels, x, y, z) numpy array of an image to an omero server using the OMERO Python bindings Example: register_image_array(hypercube, "tomo_0", "this is a tomogram", "project_x", "sample_y", "joe_usr", "joe_pwd", "192.168.2.2") Args: file_path (string): the path to the fastq file to validate project_id (string): the corresponding project ID in openBIS server sample_id (string): the corresponding sample ID in openBIS server usr (string): username for the OMERO server pwd (string): password for the OMERO server host (string): OMERO server address port (int): OMERO server port Returns: int: newly generated omero ID for registered image array """ img_id = -1 save_flag = 0 conn = omero_connect(usr, pwd, host, str(port)) for project in conn.getObjects("Project"): if project.getName() == project_id: for dataset in project.listChildren(): if dataset.getName() == sample_id: img_id = create_array(conn, img, img_name, img_desc, dataset) save_flag = 1 break if save_flag == 1: break return int(img_id)
4,064
def fixture_result(): """structure used to hold details of the intermediate result at each stage of the test""" fixture = {SUBARRAY_USED: 'ska_mid/tm_subarray_node/1', SCHEDULING_BLOCK: None, STATE_CHECK: None} yield fixture # teardown end(fixture)
4,065
def count_symbols (val): """ Counts the number of symbols in a string. A symbol is defined as any character that is neither a lowercase letter, uppercase letter or digit. Args: val (str): The string to count symbols in. Returns: int: The number of symbols in the string. """ return sum(1 for c in val if is_symbol(c))
4,066
def arctan(dy, dx): """ Returns the arctan of angle between 0 and 2*pi """ arc_tan = math.atan2(dy, dx) if arc_tan < 0: arc_tan = arc_tan + 2 * np.pi return arc_tan
4,067
def meshsize(mesh: dolfin.Mesh, kind: str = "cell") -> dolfin.MeshFunction: """Return the local meshsize `h` as a `MeshFunction` on cells or facets of `mesh`. The local meshsize is defined as the length of the longest edge of the cell/facet. kind: "cell" or "facet" """ if kind not in ("cell", "facet"): raise ValueError(f"`kind` must be 'cell' or 'facet', got {type(kind)} with value {kind}") dim = mesh.topology().dim() if kind == "cell": entities = dolfin.cells(mesh) fdim = dim else: # kind == "facet": entities = dolfin.facets(mesh) fdim = dim - 1 f = dolfin.MeshFunction("double", mesh, fdim) f.set_all(0.0) if kind == "cell": for cell in entities: f[cell] = cell.h() else: # facets have no `.h` def vertices_as_array(entity): return [vtx.point().array() for vtx in dolfin.vertices(entity)] def euclidean_distance(vtxpair): assert len(vtxpair) == 2 dx = vtxpair[0] - vtxpair[1] return np.sqrt(np.sum(dx**2)) for entity in entities: edges = dolfin.edges(entity) vtxpairs = [vertices_as_array(edge) for edge in edges] edge_lengths = [euclidean_distance(vtxpair) for vtxpair in vtxpairs] f[entity] = max(edge_lengths) return f
4,068
def write_csv_headers(logfile): """Write header lines in the CSV file with the schema of the messages involved.""" for c in MESSAGE_CLASSES: header_prefix = ["", c.__name__] header_elements = sorted(c.__slots__) logfile.write(",".join( ['"%s"' % h for h in (header_prefix + header_elements)]) + "\n")
4,069
def dep_graph_parser_parenthesis(edge_str): """Given a string representing a dependency edge in the 'parenthesis' format, return a tuple of (parent_index, edge_label, child_index). Args: edge_str: a string representation of an edge in the dependency tree, in the format edge_label(parent_word-parent_index, child_word-child_index) Returns: tuple of (parent_index, edge_label, child_index) """ tokens = edge_str.split("(") label = tokens[0] tokens = tokens[1].split(", ") parent = int(tokens[0].split("-")[-1]) - 1 child = int(",".join(tokens[1:]).split("-")[-1][:-1]) - 1 return (parent, label, child)
4,070
def multipass(args) -> None: """Install Multipass. :param args: A Namespace object containing parsed command-line options. """ if args.install: if is_debian_series(): cmd = f"{args.prefix} snap install multipass --classic" run_cmd(cmd) elif is_macos(): cmd = "brew cask install multipass" run_cmd(cmd) elif is_fedora_series(): pass elif is_win(): pass if args.config: pass if args.uninstall: if is_debian_series(): cmd = f"{args.prefix} snap uninstall multipass" run_cmd(cmd) elif is_macos(): run_cmd("brew cask uninstall multipass") elif is_fedora_series(): pass
4,071
def transfer_to_infeed(value, device_ordinal=0): """Transfers the given value into the XLA infeed queue. XLA's infeed queue is a single queue that feeds the "XLA virtual machine" with a totally ordered stream of values. This is dequeued from XLA computations via the Infeed() operation. Args: value: the value that the caller would like to enqueue into the XLA infeed queue device_ordinal: the device to infeed the value to. Each device has a distinct infeed queue. """ # TODO(phawkins): support non-default backends. backend = get_local_backend() backend.client.TransferToInfeed(value, device_ordinal)
4,072
def measure_crypts_props_no_paneth(crypt_objs, label_mask, edu_objs, df, row, col, fld): """Measure crypt level properties for all crypts in image Args: crypt_objs (array): labeled cell objects (e.g. nuclei segmentation) label_mask (array): labeled crypt objects edu_objs (list): ids of cell objects positive for EdU df (dataframe): dataframe of crypt measurements in this well - add results to dataframe row (char): row of current well col (int): column of current well fld (int): field of current frame Returns: dataframe: dataframe with measurements from this field added """ # list of crypt labels crypt_labels = nonzero_unique(label_mask) for l in crypt_labels: # measure properties for one crypt crypt_mask = get_object(label_mask, l) objs = mask_objects(crypt_objs, crypt_mask, mask_val=l) crypt_props = measure.regionprops(crypt_mask)[0] # add properties to dataframe df['num_cells'].append(len(nonzero_unique(objs))) df['num_edu'].append(count_stained_objs(objs, edu_objs)) df['nuc_area'].append(crypt_props.area) df['eccentricity'].append(crypt_props.eccentricity) df['solidity'].append(crypt_props.solidity) df['row'].append(row) df['col'].append(col) df['fld'].append(fld) return df
4,073
def intersection_angle(m1, m2): """ Computes intersection angle between two slopes. """ return math.degrees(math.atan((m2-m1) / (1+m1*m2)))
4,074
def setup_console_logging(verbosity: int = logging.INFO) -> None: """ :param int verbosity: Verbosity level logging.<verbosity> """ settings.LOGGING["handlers"]["console"]["level"] = verbosity settings.LOGGING["handlers"]["syslog"]["level"] = verbosity logging.config.dictConfig(settings.LOGGING)
4,075
def create_zappa_project( project_name, stack_name, session, client, username, email, password ): """Create the Zappa project.""" aws_rds_host = get_aws_rds_host(stack_name, session) with open('.env', 'a') as file: file.write('AWS_RDS_HOST={}\n'.format(aws_rds_host)) aws_lambda_host = deploy_zappa(project_name, client) with open('.env', 'a') as file: file.write('AWS_LAMBDA_HOST={}\n'.format(aws_lambda_host)) update_zappa(project_name, client) click.echo( 'Run initial Django migration for Zappa deployment...', nl=False ) client.containers.run( '{}_web:latest'.format(project_name), '/bin/bash -c "source ve/bin/activate && zappa manage dev migrate"', remove=True, volumes={ Path.cwd(): {'bind': '/var/task', 'mode': 'rw'}, '{}/.aws'.format(Path.home()): { 'bind': '/root/.aws', 'mode': 'ro' } } ) click.secho(' done', fg='green') click.echo( 'Create Django superuser {} for Zappa...'.format(username), nl=False ) try: django_command = '''from django.contrib.auth import get_user_model; \ User = get_user_model(); \ User.objects.create_superuser(\\"{}\\", \\"{}\\", \\"{}\\")'''.format( username, email, password ) bash_command = 'source ve/bin/activate \ && zappa invoke --raw dev "{}"'.format(django_command) zappa_command = "/bin/bash -c '{}'".format(bash_command) client.containers.run( '{}_web:latest'.format(project_name), zappa_command, remove=True, volumes={ Path.cwd(): {'bind': '/var/task', 'mode': 'rw'}, '{}/.aws'.format(Path.home()): { 'bind': '/root/.aws', 'mode': 'ro' } } ) click.secho(' done', fg='green') except docker.errors.ContainerError: pass click.echo('Running collectstatic for Zappa deployment...', nl=False) client.containers.run( '{}_web:latest'.format(project_name), '/bin/bash -c "source ve/bin/activate \ && python manage.py collectstatic --noinput"', environment={'DJANGO_ENV': 'aws-dev'}, remove=True, volumes={ Path.cwd(): {'bind': '/var/task', 'mode': 'rw'}, '{}/.aws'.format(Path.home()): { 'bind': '/root/.aws', 'mode': 'ro' } } ) click.secho(' done', fg='green') return(aws_lambda_host)
4,076
def test_retrieve_all(database_connection: mysql.connector.connect, print_response: bool = False): """Testing response from info.retrieve_all""" guests = info.retrieve_all(database_connection) assert guests is not None if print_response: print(json.dumps(guests, indent=2))
4,077
def erase_create_HDF(filename): """Create and return a new HDS5 file with the given filename, erase the file if existing. See https://github.com/NelisW/pyradi/blob/master/pyradi/hdf5-as-data-format.md for more information on using HDF5 as a data structure. open for writing, truncate if exists https://h5py.readthedocs.io/en/stable/high/file.html#opening-creating-files Args: | filename (string): name of the file to be created Returns: | HDF5 file. Raises: | No exception is raised. Author: CJ Willers """ if os.path.isfile(filename): os.remove(filename) f = h5py.File(filename,'w') return f
4,078
def test_output_group_with(temp_factory): """Test option group_with in output statement""" temp_factory("a.txt", "b.txt") for ofile in ["a.txt1", "b.txt2"]: if file_target(ofile).exists(): file_target(ofile).unlink() # # string input execute_workflow(r""" [0] files = ['a.txt', 'b.txt'] vars = [1, 2] input: files, group_by=1 output: f"{_input}.bak", group_with=dict(_vars=vars[_index]) run: expand=True touch {_output} [1] assert(_vars == _index + 1) """) for ofile in ["a.txt.bak", "b.txt.bak"]: assert file_target(ofile).target_exists("target") file_target(ofile).unlink() # # list input execute_workflow(r""" [0] files = ['a.txt', 'b.txt'] vars = [1] vars2 = ['a'] input: files, group_by=2 output: f"{_input[0]}1", group_with=('vars', 'vars2') run: expand=True touch {_output} [1] assert(_vars == 1) assert(_input._vars2 == 'a') """) for ofile in ["a.txt1"]: assert file_target(ofile).target_exists("target") file_target(ofile).unlink() # # dict input execute_workflow(r""" [0] files = ['a.txt', 'b.txt'] input: files, group_by=2 output: f"{_input[0]}.bak", group_with={'var': [1], 'var2': ['a']} run: expand=True touch {_output} [1] assert(var == 1) assert(var2 == 'a') """) for ofile in ["a.txt.bak"]: assert file_target(ofile).target_exists("target") file_target(ofile).unlink()
4,079
def cast2dtype(segm): """Cast the segmentation mask to the best dtype to save storage. """ max_id = np.amax(np.unique(segm)) m_type = getSegType(int(max_id)) return segm.astype(m_type)
4,080
def get_renders_df(product_df, order_df, user_df, address_df, num_days=90): """ Renders - All requested renders from order, both customer and tester """ renders_df = pd.merge(product_df, order_df, how='left', on='order_id', suffixes=(None, '_order')) renders_df = pd.merge(renders_df, user_df, how='left', left_on='user_id', right_on='id', suffixes=(None, '_user')) renders_df = pd.merge(renders_df, address_df, how='left', on='user_id', suffixes=(None, '_address')) renders_df = renders_df.rename(columns={'data_product': 'data', 'timestamp_product': 'timestamp'}) renders_df = renders_df[renders_df['is_in_cart'] == True] renders_df = renders_df.dropna(subset=['state']) renders_df['product_data'] = renders_df['data'].apply(lambda x: x.get('data', {})) return renders_df
4,081
def check_sparsity_level(model, config, ref_sparsity_level): """ Check that sparsity level of the model is equal to reference sparse level. """ sparsity_algo = MagnitudeSparsity(config, None) all_weights_nodes = sparsity_algo._get_all_weights_nodes(model) all_weights = [get_node_value(w_node).flatten() for w_node in all_weights_nodes] all_weights = np.concatenate(all_weights) sparsity_level = np.sum(all_weights == 0) / len(all_weights) return np.isclose(sparsity_level, ref_sparsity_level)
4,082
def dig_single_site(basedir): """ Crappy little function to dig into specific sites and look at the individual 1-month spectra. Mostly a scratchpad function, as what needs investigating varies. """ basedir = Path(basedir) files = basedir.rglob("Level3/**/*RESMIN*.npy") # the nyquist frequency max_freq = 1 / (3 * 60) # min freq corresponds to a 200 min period min_freq = 1 / (48 * 60 * 60) # frequency step, use 2x the highest frequency step ive seen # to avoid aliasing interp_freq_step = 1.87e-07 freqs = np.arange(min_freq, max_freq, interp_freq_step) max_val = 0 mask = (freqs > 0.003) * (freqs < 0.004) for f in files: data = np.load(f) psd = do_fft_on_data(data) plt.plot(freqs, psd) if np.std(psd[mask]) > max_val: max_val = np.std(psd[mask]) print(f) print(np.std(psd[mask]))
4,083
def send_register_active_email(to_email, username, token): """发送激活邮件""" # 组织邮件的内容 subject = '天天生鲜欢迎信息' message = '' sender = settings.EMAIL_FROM receiver = [to_email] html_message = """ <h1>%s, 欢迎您成为天天生鲜注册会员</h1> 请点击以下链接激活您的账号<br/> <a href="http://127.0.0.1:8000/user/active/%s">http://127.0.0.1:8000/user/active/%s</a> """ % (username, token, token) # 发生激活邮件 # send_mail(subject='邮件标题', message='邮件正文', from_email='发件人', recipient_list='收件人邮箱列表') import time time.sleep(5) send_mail(subject, message, sender, receiver, html_message=html_message)
4,084
def get_characters_character_id_contacts(*, character_id, token, if_none_match=None, page='1'): """ :param character_id: An EVE character ID :param if_none_match: ETag from a previous request. A 304 will be returned if this matches the current ETag :param page: Which page of results to return :param token: Access token to use if unable to set a header Return contacts of a character --- Alternate route: `/dev/characters/{character_id}/contacts/` Alternate route: `/latest/characters/{character_id}/contacts/` --- This route is cached for up to 300 seconds """ ESI_request.request(character_id=character_id, if_none_match=if_none_match, page=page, token=token, data_source='tranquility', version='v2', HTTP_method='GET', path=f'/characters/{character_id}/contacts/')
4,085
def test__energy(): """ test the energy read/write functions """ ref_ene = -75.00613628303537 ene_file_name = autofile.name.energy('test') ene_file_path = os.path.join(TMP_DIR, ene_file_name) ene_str = autofile.write.energy(ref_ene) assert not os.path.isfile(ene_file_path) autofile.write_file(ene_file_path, ene_str) assert os.path.isfile(ene_file_path) ene_str = autofile.read_file(ene_file_path) ene = autofile.read.energy(ene_str) assert numpy.isclose(ref_ene, ene)
4,086
def new( name: str, data: typing.Optional[bytes] = b"", digest_size: typing.Optional[int] = None, *, custom: typing.Optional[bytes] = None, # cshakes, kangarootwelve key: typing.Optional[bytes] = None, # for blakes ) -> Hash: """ Instantiate a hash object. Args: name: The name of the hash function. data: The initial chunk of message to feed to hash. Note that for ``TupleHash`` variants, even an empty byte string changes its internal state. digest_size: The length of the digest size. Must be supplied if the hash function supports it. Keyword Args: custom: A customization string. Can be supplied for hash functions that support domain separation. key: A key that is used to compute the MAC. Can be supplied for hash functions that support working as cryptographic MAC. Raises: KeyError: If ``name`` is not a hash function name. ValueError: If ``digest_size`` is required but not provided. """ return Hash(name, data, digest_size=digest_size, custom=custom, key=key)
4,087
def recursively_save_dict_contents_to_group(h5file, path, dic): """ .... """ for key, item in dic.items(): if isinstance(item, (np.ndarray, np.int64, np.dtype(float).type, str, bytes)): h5file[path + key] = item elif isinstance(item, dict): recursively_save_dict_contents_to_group(h5file, path + key + '/', item) else: raise ValueError('Cannot save %s type'%type(item))
4,088
def stringify_addresses(addresses): """ Converts a list of addresses into a string in the `"John Doe" <[email protected]>, "Jane" <[email protected]>"` format, which can be directly used in the headers of an email. Parameters ---------- addresses : (str or (str, str)) or list of (str or (str, str)) A single address or a list of addresses which is to be converted into a single string. Each element can be either an email address or a tuple of a name and an email address. Returns ------- str The address(es) as a single string which can be directly used in the headers of an email. """ if isinstance(addresses, list): addresses = [stringify_address(address) for address in addresses] return ', '.join(addresses) else: return stringify_address(addresses)
4,089
def validator_map_size(string): """ Validator for map size input Raises InputError with error description if string is not valid :param string: String to check :return: Bool, if success """ result = False if string.isdigit(): size = int(string) if 5 <= size <= 100: result = True else: raise InputError("Unacceptable map size! Try again") else: raise InputError("Input is not integer! Try again") return result
4,090
def get_short_token(app_id, app_secret, redirect_url, auth_code): """Get a short-lived access token.""" url = f"{OAUTH_URL}/access_token" payload = { "client_id": app_id, "client_secret": app_secret, "grant_type": "authorization_code", "redirect_uri": redirect_url, "code": auth_code, } resp = requests.post(url, data=payload).json() return resp["access_token"]
4,091
def random_chinese_name(): """生成随机中文名字,二到三字 Returns: str: 随机名字 """ long = random.randint(2, 3) first_name = random.choice(FIRST_NAME) last_name = random.choice(LAST_NAME) if long == 2 else "{}{}".format(random.choice(LAST_NAME), random.choice(LAST_NAME)) name = first_name + last_name return name
4,092
def generate_json_with_incorrect_prediction_value(features_definition: dict): """ Generates a list of dictonaries with keys from the given features_definitions, key in the dictionary has a corresponding value not allowed by the given definition """ mock_requests = [] def_keys = list(features_definition.keys()) for def_key in def_keys: mock_request = {key: list(value.keys())[0] for key, value in features_definition.items()} # Replace given keys, based on enumeration step, value with invalid prediction value mock_request[def_key] = 'q' mock_requests.append(mock_request) return mock_requests
4,093
def extract_text(file: UploadFile = File(...), lang: str = "eng", text_only: bool = False, custom_config: str = None): """ :param file: :param lang: available: deu, eng :return: """ filepath = "temp/" + file.filename with file.file: with open(filepath, "wb") as temp_file: temp_file.write(file.file.read()) # preprocess_image(filepath) if custom_config is None: custom_config = '--oem 3' if text_only: output = bytes(pytesseract.image_to_string(filepath, lang=lang, config=custom_config), encoding="utf-8") response = PlainTextResponse(content=output) else: output = pytesseract.image_to_pdf_or_hocr(filepath, lang=lang, extension='hocr', config=custom_config) extracted = xmltodict.parse(output) response = hocr_to_simple_json(extracted, lang) os.remove(filepath) return response
4,094
def split_data( args, data_paths: t.List[Path], val_ratio: float = 0.20, test_ratio: float = 0.10, random_state: int = 42, ) -> (t.List[str], t.List[str], t.List[str]): """ Split the data into train, val and test and save the splits to file. Args: args data_paths: list of list of scan paths in H5 file e.g. [ [scan1_FLAIR, scan1_T1, scan1_T2] ... ] val_ratio: validation set ratio test_ratio: test set ratio random_state: random state to be passed Returns: train_paths: list of scan paths for training val_paths: list of scan paths for validation test_paths: list of scan paths for testing """ test_size = int(len(data_paths) * test_ratio) val_size = int(len(data_paths) * val_ratio) train_size = len(data_paths) - val_size - test_size data_paths = np.asarray(data_paths) # shuffle indexes rng = np.random.default_rng(random_state) indexes = np.arange(len(data_paths)) rng.shuffle(indexes) # split data into train validation and test set train_paths = data_paths[indexes[:train_size]] val_paths = data_paths[indexes[train_size:train_size + val_size]] test_paths = data_paths[indexes[train_size + val_size:]] if not args.merge_scan_type: # treat each scan type separately train_paths = train_paths.flatten() val_paths = val_paths.flatten() test_paths = test_paths.flatten() return train_paths.tolist(), val_paths.tolist(), test_paths.tolist()
4,095
def check_chains(sampler, pos, theta_lb, theta_ub, mode_list=['bounds']): """ check chains 1> reset out-of-bound chains 2> reset all chains to max likelihood neighbours """ mode_all = ['bounds', 'reset_all'] for mode in mode_list: assert mode in mode_all n_walkers, n_step, n_dim = sampler.chain.shape # state of each chain state = np.ones((n_walkers,), dtype=np.bool) # the best position pos_best = sampler.flatchain[np.argsort(sampler.flatlnprobability)[-1]] # 'bounds' : chain pos should be between theta_lb, theta_ub if 'bounds' in mode_list: state = np.logical_and(state, np.array( [theta_between(pos[i], theta_lb, theta_ub) for i in range(n_walkers)])) # 'reset_all' : reset all chains if 'reset_all' in mode_list: state = np.logical_and(state, np.zeros((n_walkers,), dtype=np.bool)) # determine new pos pos_new = [] for i, state_ in enumerate(state): if not state_: # state_ = False, reset pos_new.append(pos_best + np.random.uniform(-1, 1, size=pos_best.shape) * 1.e-3) else: pos_new.append(pos[i]) return np.array(pos_new), state, pos_best
4,096
def get_index_train_test_path(_DATA_DIRECTORY_PATH, split_num, train = True): """ Method to generate the path containing the training/test split for the given split number (generally from 1 to 20). @param split_num Split number for which the data has to be generated @param train Is true if the data is training data. Else false. @return path Path of the file containing the requried data """ if train: return _DATA_DIRECTORY_PATH + "index_train_" + str(split_num) + ".txt" else: return _DATA_DIRECTORY_PATH + "index_test_" + str(split_num) + ".txt"
4,097
def read_output(): """Reads the complex values from output file sink generated by gnuradio expt 2""" complex_output = np.fromfile(file_sink_complex_expt2, dtype = 'complex64').reshape(-1,1) plt.figure() plt.plot(complex_output[11:18000].real) plt.plot(complex_output[11:18000].imag) plt.savefig('complex_output.png') plt.close('all') return complex_output
4,098
def sha3_256Validator(value): """Predicate that checks if the given value seems to be SHA-3 256 hash.""" # check if the value has the expected type stringTypeValidator(value) # SHA-3 256 hash has 64 hexadecimal characters if not re.fullmatch(r"^[a-fA-F0-9]{64}$", value): raise Invalid("the value '{value}' does not seem to be SHA-3 256 hash".format(value=value))
4,099