content
stringlengths
22
815k
id
int64
0
4.91M
def hash_bower_component(hash_obj, path): """Hash the contents of a bower component directory. This is a stable hash of a directory downloaded with `bower install`, minus the .bower.json file, which is autogenerated each time by bower. Used in lieu of hashing a zipfile of the contents, since zipfiles are difficult to hash in a stable manner. Args: hash_obj: an open hash object, e.g. hashlib.sha1(). path: path to the directory to hash. Returns: The passed-in hash_obj. """ if not os.path.isdir(path): raise ValueError('Not a directory: %s' % path) path = os.path.abspath(path) for root, dirs, files in os.walk(path): dirs.sort() for f in sorted(files): if f == '.bower.json': continue p = os.path.join(root, f) hash_obj.update(p[len(path)+1:].encode("utf-8")) hash_obj.update(open(p, "rb").read()) return hash_obj
1,100
def align_jp_and_en_boxes(pd_results) -> pd.DataFrame: """boxes are not ordered on the page, so heuristically must match them based on location on page """ japanese_results = pd.DataFrame.copy( pd_results[pd_results.language == "jp"]).reset_index() english_results = pd.DataFrame.copy( pd_results[pd_results.language == "en"]).reset_index() japanese_vals = japanese_results[["left", "top"]].values english_vals = english_results[["left", "top"]].values n = NearestNeighbors(n_neighbors=1) n.fit((japanese_vals)) dis, index = n.kneighbors(english_vals) english_results["boxID"] = index.reshape(-1) return japanese_results.append(english_results).reset_index()
1,101
def export_videos(nusc: NuScenes, out_dir: str): """ Export videos of the images displayed in the images. """ # Load NuScenes class scene_tokens = [s['token'] for s in nusc.scene] # Create output directory if not os.path.isdir(out_dir): os.makedirs(out_dir) # Write videos to disk for scene_token in scene_tokens: scene = nusc.get('scene', scene_token) print('Writing scene %s' % scene['name']) out_path = os.path.join(out_dir, scene['name']) + '.avi' if not os.path.exists(out_path): nusc.render_scene(scene['token'], out_path=out_path)
1,102
def productivity_flag(): """ Real Name: b'Productivity Flag' Original Eqn: b'1' Units: b'Dmnl' Limits: (None, None) Type: constant b'' """ return 1
1,103
def create_cartpole_network(hidden_layers=2, neurons=56): """ Network that can solve gyms 'CartPole-v1' environment. """ net = Sequential() net.add(Dense( neurons, input_shape=(4,), kernel_regularizer=l2(0.001), kernel_initializer=GlorotNormal(), activation='relu'), ) net.add(Dropout(0.1)) for n in range(hidden_layers): net.add(Dense( neurons, kernel_regularizer=l2(0.001), kernel_initializer=GlorotNormal(), activation='relu'), ) net.add(Dropout(0.1)) net.add(Dense(2, activation='relu')) return net
1,104
def askdelsnat(client, mapping): """ask static NAT delete""" if mapping.proto == 1: proto = ' udp ' else: proto = ' tcp ' text = 'nat ' + client.ipv6 + proto + mapping.src + \ ' ' + str(mapping.sport) ask('delete ' + text) syslog.syslog(syslog.LOG_NOTICE, 'del ' + text)
1,105
def format_as_rfc2822(*args, **kwrags): """Alias of ``format_as_rss()``.""" return format_as_rss(*args, **kwrags)
1,106
def train_model_exponentially(train_images, train_labels, parts, exponent): """ Trains a model incrementally, using training data partitions that increase exponentially, and exports it. :param train_images: :param train_labels: :param parts: :param exponent: :return: The final model """ normal_model = model_handler.cnn_model() # prepare data train_images, train_labels = data_manipulator.prepare_visual_data(train_images, train_labels) # split training data to partitions partitioned_train_images = partition_data_exponentially(train_images, parts, exponent) partitioned_train_labels = partition_data_exponentially(train_labels, parts, exponent) # train model for part in range(parts): normal_model.fit(partitioned_train_images[part], partitioned_train_labels[part], epochs=5, batch_size=64) model_handler.save_model(normal_model, 'normal_model_exponential_part_' + str(part + 1) + '_of_' + str(parts)) return normal_model
1,107
def get_config_of(tests, test_name): """ Find generic values of test """ for test in tests: if test.name == test_name: try: return test._test_case._run._config # pylint: disable=protected-access except AttributeError: return test._run._config # pylint: disable=protected-access raise KeyError(test_name)
1,108
def transmit_format(func): """Wrapper for dataset transforms that recreate a new Dataset to transmit the format of the original dataset to the new dataset""" @wraps(func) def wrapper(*args, **kwargs): if args: self: "Dataset" = args[0] args = args[1:] else: self: "Dataset" = kwargs.pop("self") # don't use self.format since it returns a list of columns for 'columns' even if self_format_columns is None unformatted_columns = set(self.column_names) - set(self._format_columns or []) self_format = { "type": self._format_type, "format_kwargs": self._format_kwargs, "columns": self._format_columns, "output_all_columns": self._output_all_columns, } # apply actual function out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs) datasets: List["Dataset"] = list(out.values()) if isinstance(out, dict) else [out] # re-apply format to the output for dataset in datasets: new_format = self_format.copy() if new_format["columns"] is not None: # new formatted columns = (columns - previously unformatted columns) # sort the columns to have a deterministic list of columns that we can compare with `out_format` new_format["columns"] = sorted(set(dataset.column_names) - unformatted_columns) out_format = { "type": dataset._format_type, "format_kwargs": dataset._format_kwargs, "columns": sorted(dataset._format_columns) if dataset._format_columns is not None else None, "output_all_columns": dataset._output_all_columns, } if out_format != new_format: # only apply if there's a change not to update the fingerprint for nothing dataset.set_format(**new_format) return out wrapper._decorator_name_ = "transmit_format" return wrapper
1,109
def tg_exec_command(update: Update, context: CallbackContext) -> None: """Run command in shell""" if update.message.from_user.username not in settings.get("admins", []): update.message.reply_text("ERROR: Access denied") return proc = subprocess.run(update.message.text.split(' ', 1)[-1], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) update.message.reply_text("STDOUT: %s\n\nSTDERR: %s\n\nCODE: %s" % (proc.stdout, proc.stderr, proc.returncode))
1,110
def parse_version(version: str) -> Version: """Parses version string to Version class.""" parsed = version.split(".") try: return Version(int(parsed[0]), int(parsed[1]), int(parsed[2] if len(parsed) > 2 else -1)) except ValueError: return Version(0, 0, -1)
1,111
def rate_of_change(x, t_Δ=1): """ :param x: a series :param t_Δ: the intervals between each observation (series or constant) :return: rate of change for x """ diffs = np.diff(x) / t_Δ return diffs
1,112
def draw_cutout(data, title, lower_bound=0, upper_bound=1, is_mobile=False): """ Draw a cutout data """ # Update graph data for stamps data = np.nan_to_num(data) data = sigmoid_normalizer(data, lower_bound, upper_bound) data = data[::-1] data = convolve(data, smooth=1, kernel='gauss') if is_mobile: mask = create_circular_mask(len(data), len(data[0]), center=None, radius=None) data[~mask] = np.nan if is_mobile: zsmooth = 'fast' else: zsmooth = False fig = go.Figure( data=go.Heatmap( z=data, showscale=False, hoverinfo='skip', colorscale='Greys_r', zsmooth=zsmooth ) ) # Greys_r axis_template = dict( autorange=True, showgrid=False, zeroline=False, linecolor='black', showticklabels=False, ticks='') fig.update_layout( title='', margin=dict(t=0, r=0, b=0, l=0), xaxis=axis_template, yaxis=axis_template, showlegend=True, paper_bgcolor='rgba(0,0,0,0)', plot_bgcolor='rgba(0,0,0,0)' ) if not is_mobile: fig.update_layout(width=150, height=150) style = {'display': 'inline-block', 'height': '10pc', 'width': '10pc'} else: style = {'display': 'inline-block', 'height': '5pc', 'width': '5pc'} graph = dcc.Graph( id='{}-stamps'.format(title), figure=fig, style=style, config={'displayModeBar': False} ) return graph
1,113
def _check_sample(sample_pair: dict): """ Controls a sample. Parameters ---------- sample_pair : dict Sample must contain image and mask: " "{'image': image, 'mask': mask} Returns ------- sample : dict Sample must contain image and mask: " "{'image': image, 'mask': mask} """ if isinstance(sample_pair, dict): if len(sample_pair) != 2: raise ValueError( "Sample must contain image and mask: " "{'image': image, 'mask': mask}" ) else: raise TypeError("Sample must be a dict like: {'image': image, 'mask': mask}") return sample_pair
1,114
def reset_deterministic_algorithm(): """Ensures that torch determinism settings are reset before the next test runs.""" yield if _TORCH_GREATER_EQUAL_1_8: torch.use_deterministic_algorithms(False) elif _TORCH_GREATER_EQUAL_1_7: torch.set_deterministic(False) else: # the minimum version Lightning supports is PyTorch 1.6 torch._set_deterministic(False)
1,115
def _make_cls(cls, attrs): """Make the custom config class.""" return type(f'Custom{cls.__name__}', (cls, ), attrs, )
1,116
def get_date(delta): """Build a date object with given day offset""" date = datetime.datetime.now() if delta is not None: offset = datetime.timedelta(days=delta) date = date + offset date = date.strftime("%A %-m/%-d") return date
1,117
def mover_alfil(tablero, x_inicial, y_inicial, x_final, y_final): """ (list of list, int, int, int, int) -> list of list :param tablero: list of list que representa el tablero :param x_inicial: int que representa la posicion inicial en X :param y_inicial: int que representa la posicion inicial en Y :param x_final: int que representa la posicion final en X :param y_final: int que representa la posicion final en Y :return: list of list que representa un tablero final """ tab = tablero.copy() if ((x_inicial - y_inicial == x_final - y_final) or (x_inicial + y_inicial == x_final + y_final)) and tab[x_inicial][y_final].lower() == 'a': if (x_inicial != x_final) and (y_inicial != y_final): for x in range(x_inicial +1, x_final): if tab[x][y_final] != ' ': raise ValueError('El camino no es valido') for y in range(y_inicial +1, y_final): if tab[x_final][y] != ' ': raise ValueError('El camino no es valido') tab[x_final][y_final] = 'a' tab[x_inicial][y_inicial] = ' ' return tab
1,118
def write_decaytable_entry_calchep(grouped_decays, gambit_model_name, calchep_pdg_codes, gambit_pdg_codes, decaybit_dict, calchep_processes): """ Writes a DecayBit DecayTable::Entry module function for a given set of of particle decays. Here, grouped_decays is a list, where: 1. The first element is the decaying particle. 2. The remaining entries are pairs of decay products. e.g. grouped_decays = [h, [ [tau+, tau-], [b, bbar], [t, tbar] ]] """ # Find the name of the particle as in DecayBit_rollcall.hpp decayparticle = pdg_to_particle(grouped_decays[0], decaybit_dict) chep_name = pdg_to_particle(grouped_decays[0], calchep_pdg_codes) # If the particle does not decay, according to the particle database, # then there is no need to write a capability. if decayparticle != None: pass else: return "" # TODO: proper support for BSM contributions to Z and W decays if decayparticle == "Z": return "" elif decayparticle == "W_plus": return "" elif decayparticle == "W_minus": return "" function_name = "CH_{0}_{1}_decays".format(gambit_model_name, decayparticle).replace('~','bar') spectrum = gambit_model_name + "_spectrum" # Definitely a nicer way to do this, but, this will do for now. # Should make it a bit easier to add 3 body final states. # (Overloaded as a backend function?) products = np.array(grouped_decays[1]) c_name = [] g_name = [] for i in np.arange(len(products)): c_name.append(map(lambda x:pdg_to_particle(x, calchep_pdg_codes),products[i])) g_name.append(map(lambda x:pdg_to_particle(x, gambit_pdg_codes), products[i])) out1c = np.array([pdg_to_particle(x, calchep_pdg_codes) for x in products[:,0]]) out2c = np.array([pdg_to_particle(x, calchep_pdg_codes) for x in products[:,1]]) c_strings = [] g_strings = [] for i in np.arange(len(c_name)): c_strings.append("{{{}}}".format(', '.join("\"{0}\"".format(x) for x in c_name[i]))) g_strings.append("{{{}}}".format(', '.join("\"{0}\"".format(y) for y in g_name[i]))) calchep_processes['decays'][chep_name].append([list(i) for i in zip(out1c, out2c)]) towrite = ( "void {0}(DecayTable::Entry& result)\n" "{{\n" "using namespace Pipes::{0};\n" "// Clear previous decays\n" "result = DecayTable::Entry();\n" "\n" "const Spectrum& spec = *Dep::{1};\n" "\n" ).format(function_name, spectrum) if decayparticle == "Higgs": towrite += "result = *Dep::Reference_SM_Higgs_decay_rates;\n\n" towrite += ( "str model = \"{0}\";\n" "str in = \"{1}\";" " // In state: CalcHEP particle name\n" "std::vector<std::vector<str>> out_calchep = {{{2}}}; " "// Out states: CalcHEP particle names\n" "std::vector<std::vector<str>> out_gambit = {{{3}}}; " "// Out states: GAMBIT particle names\n\n" "for (unsigned int i=0; i<out_calchep.size(); i++)\n" "{{\n" "\n" "double gamma = BEreq::CH_Decay_Width(model, in, " "out_calchep[i]); // Partial width\n" "double newwidth = result.width_in_GeV + gamma; " "// Adjust total width\n" "double wscaling = ( gamma == 0. ) ? 1 : result.width_in_GeV" "/newwidth; // Scaling for BFs, avoid NaNs\n" "result.width_in_GeV = newwidth;\n" "\n" "for (auto it = result.channels.begin(); " "it != result.channels.end(); ++it)\n" "{{\n" "it->second.first *= wscaling; " "// rescale BF \n" "it->second.second *= wscaling; // rescale error on BF \n" "}}\n" "\n" "// Avoid NaNs!\n" "double BF = ( gamma == 0. ) ? 0. : gamma/result.width_in_GeV;\n" "\n" "result.set_BF(BF, 0.0, " "out_gambit[i][0], out_gambit[i][1]);\n" "\n" "}}\n" "\n" "check_width(LOCAL_INFO, result.width_in_GeV, " "runOptions->getValueOrDef<bool>(false, " "\"invalid_point_for_negative_width\"))" ";\n" "}}" "\n" "\n" ).format(gambit_model_name, chep_name, ", ".join(c_strings), ", ".join(g_strings)) return indent(towrite, 4)
1,119
def test_roc_curve_display_default_labels( pyplot, roc_auc, estimator_name, expected_label ): """Check the default labels used in the display.""" fpr = np.array([0, 0.5, 1]) tpr = np.array([0, 0.5, 1]) disp = RocCurveDisplay( fpr=fpr, tpr=tpr, roc_auc=roc_auc, estimator_name=estimator_name ).plot() assert disp.line_.get_label() == expected_label
1,120
def collect_process_name_garbage(): """ Delete all the process names that point to files that don't exist anymore (because the work directory was temporary and got cleaned up). This is known to happen during the tests, which get their own temp directories. Caller must hold current_process_name_lock. """ global current_process_name_for # Collect the workDirs of the missing names to delete them after iterating. missing = [] for workDir, name in current_process_name_for.items(): if not os.path.exists(os.path.join(workDir, name)): # The name file is gone, probably because the work dir is gone. missing.append(workDir) for workDir in missing: del current_process_name_for[workDir]
1,121
def log(ltype, msg, logit = None): """ ######################################################################################## # print msg and log it if is needed # log([0 - INFO, 1 = WARRNING and 2 - ERROR], 'log message', 'eny value if you want # to log in jailog file' # """ logtype = ['INFO', 'WARNING', 'ERROR'] print " %s: %s" % (logtype[ltype], msg) if logit != None: logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s', level=logging.DEBUG, filename=_logfile) if ltype == 0: logging.info(msg) if ltype == 1: logging.warning(msg) if ltype == 2: logging.error(msg)
1,122
def interpolate_atmosphere(data, Z, s=0.25): """ This module generates a 1d array for the model plasma preesure, plasma density, temperature and mean molecular weight. """ from scipy.interpolate import UnivariateSpline hdata = np.array(u.Quantity(data['Z']).to(u.m)) # interpolate total pressure, temperature and density profiles pdata_f = UnivariateSpline(hdata,np.array(np.log(data['p'])),k=1, s=s) Tdata_f = UnivariateSpline(hdata,np.array(np.log(data['T'])),k=1, s=s) rdata_f = UnivariateSpline(hdata,np.array(np.log(data['rho'])),k=1, s=s) #s=0.0 to ensure all points are strictly used for ionisation state muofT_f = UnivariateSpline(hdata,np.array(np.log(data['mu'])),k=1, s=0.0) outdata = Table() outdata['Z'] = Z outdata['p'] = np.exp(pdata_f(Z.to(u.m))) * data['p'].unit outdata['T'] = np.exp(Tdata_f(Z.to(u.m))) * data['T'].unit outdata['rho'] = np.exp(rdata_f(Z.to(u.m))) * data['rho'].unit outdata['mu'] = np.exp(muofT_f(Z.to(u.m))) * u.one return outdata
1,123
def merge_vcf_files(infiles, ref_seqs, outfile, threads=1): """infiles: list of input VCF file to be merge. outfile: name of output VCF file. threads: number of input files to read in parallel""" vars_dict = vcf_file_read.vcf_files_to_dict_of_vars( infiles, ref_seqs, threads=threads ) _dict_of_vars_to_vcf_file(vars_dict, outfile)
1,124
def exponential_coulomb_uniform_correlation_density( density, amplitude=constants.EXPONENTIAL_COULOMB_AMPLITUDE, kappa=constants.EXPONENTIAL_COULOMB_KAPPA): """Exchange energy density for uniform gas with exponential coulomb. Equation 24 in the following paper provides the correlation energy per length for 1d uniform gas with exponential coulomb interaction. One-dimensional mimicking of electronic structure: The case for exponentials. Physical Review B 91.23 (2015): 235141. https://arxiv.org/pdf/1504.05620.pdf y = pi * density / kappa correlation energy per length = -amplitude * kappa * y ** 2 / (pi ** 2) / ( alpha + beta * sqrt(y) + gamma * y + delta * sqrt(y ** 3) + eta * y ** 2 + sigma * sqrt(y ** 5) + nu * pi * kappa ** 2 / amplitude * y ** 3) correlation energy density = correlation energy per length * pi / (kappa * y) = -amplitude * y / pi / ( alpha + beta * sqrt(y) + gamma * y + delta * sqrt(y ** 3) + eta * y ** 2 + sigma * sqrt(y ** 5) + nu * pi * kappa ** 2 / amplitude * y ** 3) Note the correlation energy density converge to zero at high density limit. Args: density: Float numpy array with shape (num_grids,). amplitude: Float, parameter of exponential Coulomb interaction. kappa: Float, parameter of exponential Coulomb interaction. Returns: Float numpy array with shape (num_grids,). """ y = jnp.pi * density / kappa alpha = 2. beta = -1.00077 gamma = 6.26099 delta = -11.9041 eta = 9.62614 sigma = -1.48334 nu = 1. # The derivative of sqrt is not defined at y=0, we use two jnp.where to avoid # nan at 0. finite_y = jnp.where(y == 0., 1., y) out = -amplitude * finite_y / jnp.pi / ( alpha + beta * jnp.sqrt(finite_y) + gamma * finite_y + delta * finite_y ** 1.5 + eta * finite_y ** 2 + sigma * finite_y ** 2.5 + nu * jnp.pi * kappa ** 2 / amplitude * finite_y ** 3 ) return jnp.where(y == 0., -amplitude * y / jnp.pi / alpha, out)
1,125
def val(model, dataloader, use_gpu): """val. the CNN model. Args: model (nn.model): CNN model. dataloader (dataloader): val. dataset. Returns: tuple(int, in): average of image acc. and digit acc.. """ model.eval() # turn model to eval. mode(enable droupout layers...) result_digit = [] result_img = [] for i, (data, label) in enumerate(dataloader): with torch.no_grad(): # disable autograd if use_gpu: input = data.cuda() score = model(input) pred = decode(score) tmp = pred == label.numpy() result_digit += tmp.tolist() result_img += np.all(tmp, axis=1).tolist() i = np.random.randint(0, len(dataloader) - 1) im_show = np.transpose(input[i].detach().cpu().numpy(), (1, 2, 0)) im_show = np.repeat((im_show * 255).astype(np.uint8), 3, -1) # turn model back to training mode. model.train() return np.mean(result_img), np.mean(result_digit), [im_show, pred[i]]
1,126
def ps(s): """Process String: convert a string into a list of lowercased words.""" return s.lower().split()
1,127
def edit_txt(ctx, record_id, name, value, ttl=3600): """Edit default TXT record""" try: data = { "name": name, "value": value, "ttl": ttl } r = ctx.obj['client'].default_record(record_id) record = r.edit(data) click.echo(json.dumps(record.values, indent=4)) except ClientException as e: click.echo("Error: " + str(e.code)) click.echo("Response: " + str(e.message)) ctx.exit(1)
1,128
def offset_plot_r(offset, az_min, az_max, r_plot, az_plot, logpath=None, outdir=None, shellscript=None): """ | IPTA script: /usr/local/GAMMA_SOFTWARE-20180703/ISP/scripts/offset_plot_r | Copyright 2004, Gamma Remote Sensing, v1.3 17-Jan-2005 clw | extract range and azimuth offsets for an azimuth window from an text offset file Parameters ---------- offset: (input) list of range and azimuth offsets generated by offset_pwr (text) az_min: minimum azimuth line number to extract range and azimuth offsets az_max: minimum azimuth line number to extract range and azimuth offsets r_plot: range offsets xmgrace plot file az_plot: azimuth offsets xmgrace plot file logpath: str or None a directory to write command logfiles to outdir: str or None the directory to execute the command in shellscript: str or None a file to write the Gamma commands to in shell format """ process(['/usr/local/GAMMA_SOFTWARE-20180703/ISP/scripts/offset_plot_r', offset, az_min, az_max, r_plot, az_plot], logpath=logpath, outdir=outdir, shellscript=shellscript)
1,129
def _noop_check_fix() -> None: """A lambda is not pickleable, this must be a module-level function"""
1,130
def show_table_matrix(cells, colors=None, cells2=None, colors2=None, link_matrix=None, table_font_size=32, rot_45=True, return_img=False): """Draw a view of two tables together with a row-row association matrix. """ from matplotlib import tight_layout fig, axs = plt.subplots(2, 2, figsize=(12,12)) plt.subplots_adjust(wspace=0, hspace=0) axs[1,0].axis('off') # lower-left off ax=axs[0,0] # top-left make_datatable(cells, colors, columnlabels=cells.columns, table_font_size=table_font_size, ax=ax) ax=axs[1,1] # lower right, table view rotated by 90 deg CW make_datatable(cells2, colors2, columnlabels=cells2.columns, rot_90=True, table_font_size=table_font_size, ax=ax) ax=axs[0,1] # top right make_grid(ax, ncols=cells2.shape[0], # table below grid (bottom right in rot_45 view) nrows=cells.shape[0], # table left of grid (bottom left in rot_45 view) nshrink_cr=(1, 1) # shrink axis bbox to remove one extra row and column ) try: assert link_matrix.shape == (cells.shape[0], cells2.shape[0]) links = matrix_to_df(link_matrix) for row, col in links[links.values].index: rowcol = np.array(link_matrix.shape) - (row, col) - 1 doty, dotx = 2*rowcol+1 ax.plot( [dotx, dotx, 0], [0, doty, doty], linewidth=2, color='black' ) except AttributeError: pass renderer = tight_layout.get_renderer(fig) if return_img or rot_45: img = plt.imread(savefig_to_buffer(fig, format='png', bbox_inches='tight'), format='png') if rot_45: plt.close() from scipy.ndimage import rotate plt.figure(figsize=(12,12)) plt.imshow(np.clip(rotate(img, angle=45, cval=1 # white background instead of grey outline ), 0, 1)) plt.axis('off') if return_img: return img
1,131
def login(username,password): """ 使用账号(邮箱)和密码,选择“记住我”登录 :param username: :param password: :return: """ global a a.get("https://account.fangcloud.com/login") _token = a.b.find("input",{"name":"_token"})["value"] _fstate = a.b.find("input",{"name":"_fstate"})["value"] x=a.post("https://account.fangcloud.com/login?_fstate="+_fstate, """{"login":"%s","password":"%s","remember_login":true,"login_type":"web","_fstate":"%s"}"""%(username,password, _fstate), headers={"X-CSRF-TOKEN":_token,"X-Requested-With":"XMLHttpRequest", "Content-Type":"application/json"}) result=x.json() if "redirect" not in result: raise Exception("login failed! maybe password incorrect or need captcha") url = result["redirect"] x=a.get(url, result=False, o=True, allow_redirects=True) assert 'apps/files' in x.url return True
1,132
def rules(): """Displays a markdown doc describing the predictive modeling contest. Note ./content/contest/<url calling path>.md must be modified for contest. """ file = open('./contest/content/rules.md', 'r') rawText = file.read() file.close() content = Markup(markdown(rawText, extensions=['markdown.extensions.fenced_code', 'markdown.extensions.tables'])) return render_template('markdowntemplate.html', title='Rules', content=content)
1,133
def greedy_search(decoder, encoder_outputs, encoder_outputs_mask, debug=False): """ performs beam search. returns hypotheses with scores.""" batch_size = encoder_outputs.size(0) encoder_hidden_dim = encoder_outputs.size(2) assert encoder_hidden_dim == decoder._decoder_hidden_dim trg_h_t, trg_c_t = decoder._initalize_hidden_context_states( encoder_outputs, encoder_outputs_mask) max_trg_length = decoder._max_decoding_steps # Expand tensors for each beam. dec_states = (trg_h_t, trg_c_t) gen_indices = encoder_outputs.new_zeros(batch_size, max_trg_length + 1).fill_( decoder.vocab.get_token_index(START_SYMBOL, "targets")) for i in range(1, max_trg_length): decoder_input = decoder._prepare_decode_step_input( input_indices=gen_indices[:, i - 1], decoder_hidden_state=dec_states[0], encoder_outputs=encoder_outputs, encoder_outputs_mask=encoder_outputs_mask, ) logits, dec_states = decoder._decoder_step( decoder_input, dec_states[0], dec_states[1], ) transition_probs = F.softmax(logits, dim=1) # be careful if you want to change this - the orientation doesn't # work if you switch dims in view() and remove transpose() word_lk = transition_probs.view( batch_size, -1 ) scores, gen_indices[:, i] = word_lk.max(1) # TODO calculate scores def _print_sentence(indices): sent = [_get_word(decoder.vocab, word_idx.item()) for word_idx in indices[1:]] print(' '.join(sent)) if debug: for i in range(gen_indices.size(0)): _print_sentence(gen_indices[i, :]) return gen_indices.cpu().numpy(), scores
1,134
def get(server: t.Union[Server, str], view_or_url: str, view_data: Kwargs = None, session: requests.Session = None, params: Kwargs = None, **kwargs) -> Response: """Sends a GET request.""" return request('get', server, view_or_url, view_data=view_data, session=session, params=params, **kwargs)
1,135
def parse_command_line(): """ Parses the command line options and prints the errors, if any occur. """ if "-h" in sys.argv or "--help" in sys.argv: terminate(HELP_TEXT, 0) if "-f" in sys.argv or "--file" in sys.argv: try: file_index = sys.argv.index("-f") + 1 except ValueError: file_index = sys.argv.index("--file") + 1 try: properties_file = sys.argv[file_index] if not os.path.isfile(properties_file): terminate(ERROR_MESSAGE.format("-f/--file"), 1) except IndexError: terminate(ERROR_MESSAGE.format("-f/--file"), 1) else: terminate("-f/--file option not specified", 1) if "-j" in sys.argv or "--json-report" in sys.argv: try: json_report_index = sys.argv.index("-j") + 1 except ValueError: json_report_index = sys.argv.index("--json-report") + 1 try: json_report_file = sys.argv[json_report_index] except IndexError: terminate(ERROR_MESSAGE.format("-j/--json-report"), 1) else: json_report_file = None generate_html_report = True if "-g" in sys.argv or "--generate-html-report" in sys.argv else False if generate_html_report and ("-r" in sys.argv or "--html-report" in sys.argv): try: html_report_index = sys.argv.index("-r") + 1 except ValueError: html_report_index = sys.argv.index("--html-report") + 1 try: html_report_file = sys.argv[html_report_index] except IndexError: terminate(ERROR_MESSAGE.format("-r/--html-report"), 1) else: html_report_file = None return properties_file, json_report_file, generate_html_report, html_report_file
1,136
def upload_files( project: str, paths: Sequence[Union[Path, str]], target_dir: str, strip_prefix: str = "", progress_bar: bool = True, ) -> None: """Upload all provided files from the local filesystem into `target_dir` on GCS. `strip_prefix` is removed from each local filepath and the remainder is appended to `target_dir` to create the target path. Note: The bucket should be included in the target path! """ # Remove any gs:// prefix and split the bucket name off the target dir target_dir = Path(remove_prefix(target_dir, "gs://")) bucket_name = target_dir.parts[0] target_dir = str(target_dir.relative_to(bucket_name)) bucket = gcs.Client(project=project).get_bucket(str(bucket_name)) # Note: This will overwrite any blobs that already exist. def upload_file(file: Path) -> TransferEvent: blob = bucket.blob( os.path.join(target_dir, remove_prefix(str(file), strip_prefix).strip("/")) ) blob.upload_from_filename(str(file), checksum="md5") return TransferEvent(file.stat().st_size, str(file), blob.name) # Create a ThreadPool to upload multiple files in parallel with ThreadPoolExecutor() as e: futures = [e.submit(upload_file, path) for path in paths] if progress_bar: network_futures_progress_bar(futures, mode="upload", keep_order=False) else: wait(futures)
1,137
def to_float(dataframe, column): """General Function to return floats""" dataframe[column] = dataframe[column].dropna().astype(float) dataframe[column] = dataframe[column].where(pandas.notnull(dataframe[column]), None) return dataframe[column]
1,138
def compute_kv_template(config): """Draw from a line template""" # copy to a new memory, avoid lost info _i = config['info'].copy() _d = config['data'].copy() # fill in the data if len(_d) < 1: raise ValueError('A template data is essential!') config['data'] = [] for log_name in os.listdir(_i['dir']): if log_name.split('.')[-1] != 'log': continue config['data'].append(_d[0].copy()) config['data'][-1]['path'] = os.path.join(_i['dir'], log_name) # output to jsonfile output_filename = os.path.basename(_i['dir'])+'.'+_i['task'] + '.json' output = os.path.join(_i['dir'], output_filename) # change type config['task'] = _i['task'] config.pop('info') utils.save_json(config, output) print('Config file has been saved in %s' % output) if _i['run']: return compute_kv(config)
1,139
def load_qm7(featurizer=None, split='random'): """Load qm7 datasets.""" # Featurize qm7 dataset print("About to featurize qm7 dataset.") current_dir = os.path.dirname(os.path.realpath(__file__)) dataset_file = os.path.join(current_dir, "./gdb7.sdf") qm7_tasks = ["u0_atom"] if featurizer is None: featurizer = dc.feat.CoulombMatrixEig(23) loader = dc.data.SDFLoader( tasks=qm7_tasks, smiles_field="smiles", mol_field="mol", featurizer=featurizer) dataset = loader.featurize(dataset_file) split_file = os.path.join(current_dir, "./qm7_splits.csv") split_indices = [] with open(split_file, 'r') as f: reader = csv.reader(f) for row in reader: row_int = (np.asarray(list(map(int, row)))).tolist() split_indices.append(row_int) splitters = { 'index': dc.splits.IndexSplitter(), 'random': dc.splits.RandomSplitter(), 'indice': dc.splits.IndiceSplitter(valid_indices=split_indices[1]), 'stratified': dc.splits.SingletaskStratifiedSplitter(task_number=0) } splitter = splitters[split] train_dataset, valid_dataset, test_dataset = splitter.train_valid_test_split( dataset) transformers = [ dc.trans.NormalizationTransformer( transform_y=True, dataset=train_dataset) ] for transformer in transformers: train_dataset = transformer.transform(train_dataset) valid_dataset = transformer.transform(valid_dataset) test_dataset = transformer.transform(test_dataset) return qm7_tasks, (train_dataset, valid_dataset, test_dataset), transformers
1,140
def dadbt(a: torch.Tensor, diag_mat: torch.Tensor, b: torch.Tensor) -> torch.Tensor: """Batched computation of diagonal entries of (A * diag_mat * B^T) where A and B are batches of square matrices and diag_mat is a batch of diagonal matrices (represented as vectors containing diagonal entries) :param a: batch square matrices :param diag_mat: batch of diagonal matrices (represented as vecotrs containing diagonal entries :param b: batch of square matrices :returns diagonal entries of A * diag_mat * B^T""" return bmv(a * b, diag_mat)
1,141
def test_get_input(monkeypatch): """Check if provided input is return unmodified.""" from tokendito import helpers monkeypatch.setattr("tokendito.helpers.input", lambda _: "pytest_patched") assert helpers.get_input() == "pytest_patched"
1,142
def local_minima(image, footprint=None, connectivity=None, indices=False, allow_borders=True): """Find local minima of n-dimensional array. The local minima are defined as connected sets of pixels with equal gray level (plateaus) strictly smaller than the gray levels of all pixels in the neighborhood. Parameters ---------- image : ndarray An n-dimensional array. footprint : ndarray, optional The footprint (structuring element) used to determine the neighborhood of each evaluated pixel (``True`` denotes a connected pixel). It must be a boolean array and have the same number of dimensions as `image`. If neither `footprint` nor `connectivity` are given, all adjacent pixels are considered as part of the neighborhood. connectivity : int, optional A number used to determine the neighborhood of each evaluated pixel. Adjacent pixels whose squared distance from the center is less than or equal to `connectivity` are considered neighbors. Ignored if `footprint` is not None. indices : bool, optional If True, the output will be a tuple of one-dimensional arrays representing the indices of local minima in each dimension. If False, the output will be a boolean array with the same shape as `image`. allow_borders : bool, optional If true, plateaus that touch the image border are valid minima. Returns ------- minima : ndarray or tuple[ndarray] If `indices` is false, a boolean array with the same shape as `image` is returned with ``True`` indicating the position of local minima (``False`` otherwise). If `indices` is true, a tuple of one-dimensional arrays containing the coordinates (indices) of all found minima. See Also -------- skimage.morphology.local_maxima skimage.morphology.h_maxima skimage.morphology.h_minima Notes ----- This function operates on the following ideas: 1. Make a first pass over the image's last dimension and flag candidates for local minima by comparing pixels in only one direction. If the pixels aren't connected in the last dimension all pixels are flagged as candidates instead. For each candidate: 2. Perform a flood-fill to find all connected pixels that have the same gray value and are part of the plateau. 3. Consider the connected neighborhood of a plateau: if no bordering sample has a smaller gray level, mark the plateau as a definite local minimum. Examples -------- >>> from skimage.morphology import local_minima >>> image = np.zeros((4, 7), dtype=int) >>> image[1:3, 1:3] = -1 >>> image[3, 0] = -1 >>> image[1:3, 4:6] = -2 >>> image[3, 6] = -3 >>> image array([[ 0, 0, 0, 0, 0, 0, 0], [ 0, -1, -1, 0, -2, -2, 0], [ 0, -1, -1, 0, -2, -2, 0], [-1, 0, 0, 0, 0, 0, -3]]) Find local minima by comparing to all neighboring pixels (maximal connectivity): >>> local_minima(image) array([[False, False, False, False, False, False, False], [False, True, True, False, False, False, False], [False, True, True, False, False, False, False], [ True, False, False, False, False, False, True]]) >>> local_minima(image, indices=True) (array([1, 1, 2, 2, 3, 3]), array([1, 2, 1, 2, 0, 6])) Find local minima without comparing to diagonal pixels (connectivity 1): >>> local_minima(image, connectivity=1) array([[False, False, False, False, False, False, False], [False, True, True, False, True, True, False], [False, True, True, False, True, True, False], [ True, False, False, False, False, False, True]]) and exclude minima that border the image edge: >>> local_minima(image, connectivity=1, allow_borders=False) array([[False, False, False, False, False, False, False], [False, True, True, False, True, True, False], [False, True, True, False, True, True, False], [False, False, False, False, False, False, False]]) """ return local_maxima( image=invert(image), footprint=footprint, connectivity=connectivity, indices=indices, allow_borders=allow_borders )
1,143
def kron(a, b): """ Kronecker product of matrices a and b with leading batch dimensions. Batch dimensions are broadcast. The number of them mush :type a: torch.Tensor :type b: torch.Tensor :rtype: torch.Tensor """ siz1 = torch.Size(tensor(a.shape[-2:]) * tensor(b.shape[-2:])) res = a.unsqueeze(-1).unsqueeze(-3) * b.unsqueeze(-2).unsqueeze(-4) siz0 = res.shape[:-4] return res.reshape(siz0 + siz1)
1,144
def process_query(request): """the function is called upon "news/" URL. it processes the query and calls the apifunction to fetch news articles from third party news APIs. If a query is new, it makes a fresh request to third party APIs and returns the query results and adds the query and query results into the database. Otehrwise, if the query is repeated, it fetches the results from the database; if it has not passed the expiry time( set to 120s). If it has passed the expiry team a new request is sent to the third party news APIs and the results are updated in the database. Args: request (GET) Returns: json: returns the list of query results in the form of json object. """ if request.method =='POST': return JsonResponse({'Response': 'Invalid Request type, please use "GET"'}, status=400) try: keyword = request.GET.get('query') request_time = datetime.datetime.now(pytz.UTC) obj, created = Query.objects.get_or_create( keyword = keyword ) if created==True: add_to_db(obj,keyword) elif (request_time - obj.query_time).seconds > EXPIRY_TIME: obj.query_result.all().delete() Query.objects.filter(keyword = keyword).update(query_time = request_time) add_to_db(obj,keyword) response=[] for item in obj.query_result.all(): response.append(item.to_dict()) return JsonResponse(response, safe = False, status=200) except Exception as e: print(e) return JsonResponse({'Response': 'Something went wrong'}, status=400)
1,145
def fix_header(params, recipe, infile=None, header=None, raise_exception=False, **kwargs): """ Instrument specific header fixes are define in pseudo_const.py for an instrument and called here (function in pseudo_const.py is HEADER_FIXES) :param params: :param infile: :return: """ # deal with no header if header is None: header = infile.header hdict = infile.hdict filename = infile.filename has_infile = True else: has_infile = False hdict = Header() filename = None # load pseudo constants pconst = constants.pload(params['INSTRUMENT']) # use pseudo constant to apply any header fixes required (specific to # a specific instrument) and update the header try: header, hdict = pconst.HEADER_FIXES(params=params, recipe=recipe, header=header, hdict=hdict, filename=filename, **kwargs) except lang.drs_exceptions.DrsHeaderError as e: if raise_exception: raise e else: eargs = [e.key, e.filename] WLOG(params, 'error', TextEntry('01-001-00027', args=eargs)) # if the input was an infile return the infile back if has_infile: # return the updated infile infile.header = header infile.hdict = hdict return infile # else return the header (assuming input was a header only) else: # else return the header return header, hdict
1,146
def encode(df: pd.DataFrame, cols: List[str], drop_first: bool = True) -> pd.DataFrame: """Do a dummy encoding for the columsn specified Args: df: DataFrame cols: List of columns to perform dummy encoding on drop_first: parameter for dummy encoding """ dfs = [] for col in df.columns: ds = df[col] if col not in cols: dfs.append(ds.to_frame()) else: dfs.append(pd.get_dummies(ds, prefix=col, drop_first=drop_first)) return pd.concat(dfs, axis=1)
1,147
def do_absolute_limits(cs, args): """Lists absolute limits for a user.""" limits = cs.limits.get().absolute columns = ['Name', 'Value'] utils.print_list(limits, columns)
1,148
def binary_search(pool: list, target) -> Optional[int]: """Search for a target in a list, using binary search. Args: pool (list): a pool of all elements being searched. target: the target being searched. Returns: int: the index of the target. """ sorted_pool = sorted(pool) low = 0 high = len(sorted_pool) - 1 while low + 1 != high: mid = (low + high) // 2 if sorted_pool[mid] == target: return mid if sorted_pool[mid] < target: low = mid else: high = mid return None
1,149
def load_array(filename): """ Given a valid image, load the image and return the pixels as a numpy array :param filename: The filename as a string :returns: A numpy array which stores the pixel data from a snowmap Convention is as follows: pixels that read 0,0,0, 255 are read as snow-free and contain the value 0; pixels that read 0,0,0,0 assume no data and return -1, and pixels that read (255, 255, 255, 255) are read as snow and get the value 1 """ image = Image.open(filename) image.load() height, width = image.size snowmap = np.zeros((height, width), dtype=int) for row in range(height): for col in range(width): a = image.getpixel((row,col)) if a == (0, 0, 0, 255): # This is no snow snowmap[row, col] = 0 elif a == (0, 0, 0, 0): # this is no data snowmap[row, col] = -1 elif a == (255, 255, 255, 255): # that's for snow snowmap[row, col] = 1 else: raise ValueError("Unknown Pixel value {}".format(a)) return snowmap
1,150
def GenerateParseTreeDecls(concrete_classes, abstract_classes): """Generates parse_tree_decls.h contents containing forward declarations. Args: concrete_classes: a list of classes for which to generate declarations abstract_classes: a list of classes for which to generate declarations Yields: A string part of the output code. """ yield textwrap.dedent('''\ #ifndef STORAGE_ZETASQL_PARSER_PARSE_TREE_DECLS_H #define STORAGE_ZETASQL_PARSER_PARSE_TREE_DECLS_H namespace zetasql { ''') for cls in abstract_classes + concrete_classes: yield 'class {0};\n'.format(cls) yield textwrap.dedent('''\ } // namespace zetasql #endif // STORAGE_ZETASQL_PARSER_PARSE_TREE_DECLS_H ''')
1,151
def test_module_exports(all_names: list[str], cls: Type[object]) -> None: """Test that ass_tag_parser.__init__.__all__ includes all defined ASS tags and draw commands. """ assert cls.__name__ in all_names
1,152
def create_anchors_3d_stride(feature_size, anchor_strides, sizes=[1.6, 3.9, 1.56], anchor_offsets=[0, -20, -1], # [0.2, -39.8, -1.78], rotations=[0, 1.57], # np.pi / 2 dtype=np.float32): """ Args: feature_size: list [D, H, W](zyx) sizes: [N, 3] list of list or array, size of anchors, xyz Returns: anchors: [*feature_size, num_sizes, num_rots, 7] tensor. """ # almost 2x faster than v1 x_stride, y_stride, z_stride = anchor_strides x_offset, y_offset, z_offset = anchor_offsets z_centers = np.arange(feature_size[0], dtype=dtype) y_centers = np.arange(feature_size[1], dtype=dtype) x_centers = np.arange(feature_size[2], dtype=dtype) z_centers = z_centers * z_stride + z_offset y_centers = y_centers * y_stride + y_offset x_centers = x_centers * x_stride + x_offset sizes = np.reshape(np.array(sizes, dtype=dtype), [-1, 3]) rotations = np.array(rotations, dtype=dtype) rets = np.meshgrid( x_centers, y_centers, z_centers, rotations, indexing='ij') tile_shape = [1] * 5 tile_shape[-2] = int(sizes.shape[0]) for i in range(len(rets)): rets[i] = np.tile(rets[i][..., np.newaxis, :], tile_shape) rets[i] = rets[i][..., np.newaxis] # for concat sizes = np.reshape(sizes, [1, 1, 1, -1, 1, 3]) tile_size_shape = list(rets[0].shape) tile_size_shape[3] = 1 sizes = np.tile(sizes, tile_size_shape) rets.insert(3, sizes) ret = np.concatenate(rets, axis=-1) return np.transpose(ret, [2, 1, 0, 3, 4, 5])
1,153
def sample_unit(name='oz'): """Create and return a sample unit""" return Unit.objects.create(name=name)
1,154
def CreateBlendCurve2(curve0, t0, reverse0, continuity0, curve1, t1, reverse1, continuity1, multiple=False): """ Makes a curve blend between 2 curves at the parameters specified with the directions and continuities specified Args: curve0 (Curve): First curve to blend from t0 (double): Parameter on first curve for blend endpoint reverse0 (bool): If false, the blend will go in the natural direction of the curve. If true, the blend will go in the opposite direction to the curve continuity0 (BlendContinuity): Continuity for the blend at the start curve1 (Curve): Second curve to blend from t1 (double): Parameter on second curve for blend endpoint reverse1 (bool): If false, the blend will go in the natural direction of the curve. If true, the blend will go in the opposite direction to the curve continuity1 (BlendContinuity): Continuity for the blend at the end Returns: Curve: The blend curve on success. None on failure """ url = "rhino/geometry/curve/createblendcurve-curve_double_bool_blendcontinuity_curve_double_bool_blendcontinuity" if multiple: url += "?multiple=true" args = [curve0, t0, reverse0, continuity0, curve1, t1, reverse1, continuity1] if multiple: args = list(zip(curve0, t0, reverse0, continuity0, curve1, t1, reverse1, continuity1)) response = Util.ComputeFetch(url, args) response = Util.DecodeToCommonObject(response) return response
1,155
def fetch_website(url, user_agent, results_location_dir): """function to use for website fetch :param url: url to fetch information from :param user_agent: user agent string that is used by the minion in making the fetch :param results_location_dir: the location to where the results are stored :return: results_data - a dictionary of metadata on the fetch This method uses a different library than the basic fetch method, Ghost.py (documentation at http://ghost-py.readthedocs.io/en/latest/#). After cleaning the url, a session is opened with the user agent string passed in. Then the specific web page is opened and all the resources of the web page are collected. After that, a screen-shot of the web page is collected. Then, the page data is written to a file that is named from the session id. Then each resource gathered during the fetch is written to a file, and these are placed in the same directory as the page data. Beyond that, miscellaneous metadata is written to the results_data dictionary. """ log_debug("fetch_website", "Entering fetch_website") # clean the url url_clean = url.lstrip() log_debug("fetch_website", "Starting Fetch of: " + url_clean) # start a Ghost.py session session = Ghost().start(user_agent=user_agent) results_data = {'requested_url': url, 'actual_url': url_clean, 'remote_job_id': str(session.id)} try: # open the web page and gather all the page's resources page, resources = session.open(address=url_clean, user_agent=user_agent) # catch a TimeoutError except (ghost.TimeoutError, ghost.Error): results_data['connection_success'] = False log_debug("fetch_website", "Connection Failed for Fetch: " + url_clean) return results_data except Exception as e: print type(e) print str(e) return results_data # if page is None and there are no resources, that means that a connection to the page failed if page is None and len(resources) == 0: log_debug("fetch_website", "") results_data['connection_success'] = False else: netloc = urlparse(url_clean).netloc log_debug("fetch_website", "Attempting to capture screenshot of {}".format(netloc)) try: # capture a screen-shot of the web page session.capture_to("{}/{}.png".format(results_location_dir, netloc)) log_debug("fetch_website", "Successful capture of screenshot of {}".format(netloc)) except Exception as e: log_debug("fetch_website", "Failed to capture screenshot of {}".format(netloc)) print type(e) print str(e) try: log_debug("fetch_website", "Opening: {}/{} for: {}".format(results_location_dir, session.id, url_clean)) fetch_file = open("{}/{}".format(results_location_dir, session.id), 'w') log_debug("fetch_website", "writing page content to file") # write page content to file fetch_file.write(page.content) log_debug("fetch_website", "closing {}".format(session.id)) fetch_file.close() # write the data of each resource to different files for resource in resources: log_debug("fetch_website", "opening {}/resource{} for: {}".format(results_location_dir, resources.index(resource), url_clean)) data_file = open("{}/resource{}".format(results_location_dir, resources.index(resource)), "w") log_debug("fetch_website", "writing content to {}".format(resources.index(resource))) data_file.write(resource.content) log_debug("fetch_website", "closing {}".format(resources.index(resource))) data_file.close() results_data['fetch_object_success'] = True except: results_data['fetch_object_success'] = False finally: # collect more metadata results_data['connection_success'] = True results_data['server_info'] = dict(page.headers) results_data['response_code'] = page.http_status if page.http_status in [400, 404, 403, 401]: results_data["fetch_success"] = False if len(session.cookies) > 0: results_data['cookies'] = [x.value().data() for x in session.cookies] return results_data
1,156
def make_raster_from_images(modeladmin, request, queryset): """Make a raster of the selected `ImageMeta`s. This is an action on `ImageMeta` """ imset = make_image_set_from_images(modeladmin, request, queryset) return _make_raster_from_image_set(imset)
1,157
def is_rotation(first, second): """Given two strings, is one a rotation of the other.""" if len(first) != len(second): return False double_second = second + second return first in double_second
1,158
def bin_entities(uri_set, delimiter="/", splitpos=-1): """ Takes iteratable elemts and splits them according to the position (splitpos) of the delimiter. The first part is used as a key, whereas the second appended to a list connected to the former key. return: dict {key1: [id11, id12, id13, …], key2: […}} """ ent_dict = dict() for res in uri_set: # split entity up to splitpos using delimiter entity = delimiter.join(res.split(delimiter)[:splitpos]) # id_ is the remainder id_ = delimiter.join(res.split(delimiter)[splitpos:]) if entity in ent_dict: ent_dict[entity].append(id_) else: ent_dict[entity] = [id_] return ent_dict
1,159
def frequency(state_1, state_2): """ The frequency interval between state_1 and state_2 in GHz. """ return 1e-9 * interval(state_1, state_2) / h
1,160
def dac(dns_val=None) -> OrderedDict: """ Domain Availability Checker (DNS lookup) :param _dns: URL string :return: Availability [True, False] """ ip_values = None avail = False if dns_val is None: raise ValueError("Sorry, DNS is needed") if isinstance(dns_val, str) is False: raise TypeError("Sorry, \'DNS\' must be type \'str\'") try: output = dns.resolver.resolve(dns_val, 'A') ip_values = [ipval.to_text() for ipval in output] except dns.resolver.NXDOMAIN: avail = True return OrderedDict([ ("DNS", dns_val), ("IP", ip_values), ("AVAIL", avail), ])
1,161
def display_timestamp(num_seconds): """get a string to conveniently display a timestamp""" seconds = num_seconds % 60 minutes = int(num_seconds / 60) % 60 hrs = int(num_seconds / 3600) return "{}:{}:{}".format(hrs, minutes, seconds)
1,162
def get_bloglist(content_dict={}): """ 输入的指令为-m,则列出博客的文章列表 :param content_dict: :return: """ bloglist = crawlBlog.get_archives(5) tousername = content_dict["FromUserName"] fromusername = content_dict["ToUserName"] return WeixinUtils.make_news(bloglist, tousername, fromusername)
1,163
def cdo_daily_means(path, file_includes): """ loops through the given directory and and executes "cdo dayavg *file_includes* file_out" appends "dayavg" at the end of the filename """ for name in os.listdir(path): if file_includes in name and 'dayavg' not in name: name_new = f"{''.join(name.split('.')[:-1])}_dayavg.{name.split('.')[-1]}" print(f'calculating daily means for {name} to {name_new} in {path} ...') os.system(f'cdo dayavg {pjoin(path, name)} {pjoin(path, name_new)}')
1,164
def get_closest(arr, value): """ Return the array values closest to the request value, or +/-inf if the request value is beyond the range of the array Parameters ---------- arr : sequence array of values value : numeric Returns ------- 2-tuple: largest value in array less than value (or -inf) and smallest value in array larger than value (or +inf) """ arr_sorted = sorted(arr) index = bisect(arr_sorted, value) lower_limit = -np.inf if index == 0 else arr_sorted[index - 1] upper_limit = np.inf if index == len(arr_sorted) else arr_sorted[index] return lower_limit, upper_limit
1,165
def filename(config, key, ext = '.h5', set = ''): """ Get the real file name by looking up the key in the config and suffixing. :param key: key to use in the config :type key: str :param ext: extension to use :type ext: str :param set: set name :type set: str :return: filepath :rtype: str """ name = config[key] + '_' if set: name += set + '_' name += str(config['multiplier']) + '_' + str(config['height']) + 'x' + str(config['width']) + 'x' + str(config['depth'])\ if ext: name += ext return name
1,166
def XIRR( values: func_xltypes.XlArray, dates: func_xltypes.XlArray, guess: func_xltypes.XlNumber = 0.1 ) -> func_xltypes.XlNumber: """Returns the internal rate of return for a schedule of cash flows that is not necessarily periodic. https://support.microsoft.com/en-us/office/ xirr-function-de1242ec-6477-445b-b11b-a303ad9adc9d Algorithm found on stackoverflow: https://stackoverflow.com/questions/63797804/ python-irr-function-giving-different-result-than-excel-xirr From MS, Newton's method is used to optimize: https://docs.microsoft.com/en-us/office/troubleshoot/excel/ algorithm-of-xirr-funcation """ values = values.flatten(func_xltypes.Number, None) dates = dates.flatten(func_xltypes.DateTime, None) # need to cast dates and guess to Python types else optimizer complains dates = [float(date) for date in dates] guess = float(guess) # TODO: Ignore non numeric cells and boolean cells. if len(values) != len(dates): raise xlerrors.NumExcelError( f'`values` range must be the same length as `dates` range ' f'in XIRR, {len(values)} != {len(dates)}') series = pd.DataFrame({"dates": dates, "values": values}) # Filter all rows with 0 cashflows series = series[series['values'] != 0] # Sort dataframe by date series = series.sort_values('dates', ascending=True) series['values'] = series['values'].astype('float') # Create separate lists for values and dates series_values = list(series['values']) series_dates = list(series['dates']) # Calculate IRR return _xirr(series_values, series_dates, guess)
1,167
def isUp(): """ Whether this docker container is up """ return 'True'
1,168
def main(): """ Main program. """ ## Arguments. # VCF file path. name_file = sys.argv[1] ## Steps. # Open VCF file to read. file_in = open(name_file, 'r') # Create the lists: type_variants = [] number_variants = [] # Sum the SNPs and INDELs according to column number eight (INFO). for line in file_in: line = line.strip() if line[:2] != '##': data = line.split('\t') INFO = data[7].split(';') if INFO[0][:3] == 'DP=' and 'SNP' not in type_variants: type_variants.append('SNP') number_variants.append(1) elif INFO[0][:3] == 'DP=' and 'SNP' in type_variants: i = type_variants.index('SNP') number_variants[i] += 1 elif INFO[0][:5] == 'INDEL' and 'INDEL' not in type_variants: type_variants.append('INDEL') number_variants.append(1) elif INFO[0][:5] == 'INDEL' and 'INDEL' in type_variants: i = type_variants.index('INDEL') number_variants[i] += 1 # Print on the screen the number of SNPs and INDELs. print('## <type of variant>:<number of variants of this type>') for x in range(len(type_variants)): print(type_variants[x] + ':' + str(number_variants[x]))
1,169
def rand_perm_(img, x, y, x_max, y_max, kernel, flatten): """ Applies INPLACE the random permutation defined in `kernel` to the image `img` on the zone defined by `x`, `y`, `x_max`, `y_max` :param img: Input image of dimension (B*C*W*H) :param x: offset on x axis :param y: offset on y axis :param x_max: end of the zone to permute on the x axis :param y_max: end of the zone to permute on the y axis :param kernel: LongTensor of dim 1 containing one value for each point in the zone to permute :return: the permuted image. """ assert img.dim() == 4 if img.size(1) != 1: raise NotImplementedError('Not Implemented for multi-channel images') zone = img[:, :, x:x_max, y:y_max].contiguous() img[:, :, x:x_max, y:y_max] = zone.view(zone.size(0), -1)\ .index_select(1, kernel).view(zone.size()) return img.view(img.size(0), -1) if flatten else img
1,170
def test(model, data_loader, use_cuda, loss_func): """ The function to evaluate the testing data for the trained classifiers :param model: :param data_loader: :param use_cuda: :return: """ softmax = torch.nn.Softmax(dim=1) columns = ['participant_id', 'session_id', 'slice_id', 'true_label', 'predicted_label', 'proba0', 'proba1'] results_df = pd.DataFrame(columns=columns) total_loss = 0 if use_cuda: model.cuda() model.eval() # set the model to evaluation mode torch.cuda.empty_cache() with torch.no_grad(): for i, data in enumerate(data_loader): if use_cuda: imgs, labels = data['image'].cuda(), data['label'].cuda() else: imgs, labels = data['image'], data['label'] output = model(imgs) normalized_output = softmax(output) loss = loss_func(output, labels) total_loss += loss.item() _, predicted = torch.max(output.data, 1) # Generate detailed DataFrame for idx, sub in enumerate(data['participant_id']): row = [sub, data['session_id'][idx], data['slice_id'][idx].item(), labels[idx].item(), predicted[idx].item(), normalized_output[idx, 0].item(), normalized_output[idx, 1].item()] row_df = pd.DataFrame(np.array(row).reshape(1, -1), columns=columns) results_df = pd.concat([results_df, row_df]) del imgs, labels, output torch.cuda.empty_cache() # calculate the balanced accuracy results = evaluate_prediction(results_df.true_label.values.astype(int), results_df.predicted_label.values.astype(int)) results_df.reset_index(inplace=True, drop=True) results['total_loss'] = total_loss torch.cuda.empty_cache() return results_df, results
1,171
def is_fundamental_error(path, error): """ Returns True if error is not field related. (So type related, for example.) """ return not is_any_field_error(path, error)
1,172
def countdown(i): """Count down by 1 from i to 0.""" print(i) if i <= 0: # Base case return else: # Recursive case countdown(i-1)
1,173
def check_git_modified(clinfo): """로컬 git 저장소 변경 여부. Commit 되지 않거나, Push 되지 않은 내용이 있으면 경고 Returns: bool: 변경이 없거나, 유저가 확인한 경우 True """ nip = _get_ip( clinfo['instance']['notebook'], clinfo['profile'].get('private_command') ) user = clinfo['template']['notebook']['ssh_user'] private_key = clinfo['template']['notebook']['ssh_private_key'] git_dirs = clinfo['git_cloned_dir'] uncmts = [] unpushs = [] for git_dir in git_dirs: cmd = "cd {} && git status --porcelain | grep '^ M.*'".format(git_dir) _uncmts, _ = send_instance_cmd(user, private_key, nip, cmd) uncmts += [os.path.join(git_dir, u) for u in _uncmts if len(u) > 0] cmd = "cd {} && git cherry -v".format(git_dir) _unpushs, _ = send_instance_cmd(user, private_key, nip, cmd) unpushs += [os.path.join(git_dir, u) for u in _unpushs if len(u) > 0] uncmt_cnt = len(uncmts) unpush_cnt = len(unpushs) if uncmt_cnt > 0 or unpush_cnt > 0: print() print("There are {} uncommitted file(s) and {} unpushed commits(s)!". format(uncmt_cnt, unpush_cnt)) if uncmt_cnt > 0: print() print("Uncommitted file(s)") print("-------------------") for f in uncmts: print(f.strip()) if unpush_cnt > 0: print() print("Unpushed commit(s)") print("-------------------") for f in unpushs: print(f.strip()) print() ans = '' while ans.lower() not in ('y', 'n'): ans = input("Are you sure to destroy this cluster? (y/n): ") return ans == 'y' return True
1,174
def calculate_normalized_fitness(population): """ Args: population (Population): Returns: None """ # onvert to high and low scores. scores = population.get("score") scores = [-s for s in scores] min_score = np.nanmin(scores) shifted_scores = [0 if np.isnan(score) else score - min_score for score in scores] sum_scores = sum(shifted_scores) if sum_scores == 0: print( "WARNING: Shifted scores are zero. Normalized fitness is therefore dividing with " "zero, could be because the population only contains one individual" ) for individual, shifted_score in zip(population.molecules, shifted_scores): individual.normalized_fitness = shifted_score / sum_scores
1,175
def test_list_id_length_nistxml_sv_iv_list_id_length_1_1(mode, save_output, output_format): """ Type list/ID is restricted by facet length with value 5. """ assert_bindings( schema="nistData/list/ID/Schema+Instance/NISTSchema-SV-IV-list-ID-length-1.xsd", instance="nistData/list/ID/Schema+Instance/NISTXML-SV-IV-list-ID-length-1-1.xml", class_name="Out", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", )
1,176
def reset_trigger(trigger_name, delete_jobs): """ Reset the trigger to its initial state. """ # Validate trigger existance. TriggerManager.validate_existance(trigger_name) if delete_jobs: _delete_trigger_jobs(trigger_name) TriggerManager.update_action_metadata(trigger_name, {})
1,177
def migrate_to_latest(json_dict, info): """Migrates the STAC JSON to the latest version Args: json_dict (dict): The dict of STAC JSON to identify. info (STACJSONDescription): The info from :func:`~pystac.serialzation.identify.identify_stac_object` that describes the STAC object contained in the JSON dict. Returns: dict: A copy of the dict that is migrated to the latest version (the version that is pystac.STAC_VERSION) """ result = deepcopy(json_dict) version = info.version_range.latest_valid_version() if version != STAC_VERSION: _object_migrations[info.object_type](result, version, info) for ext in info.common_extensions: _extension_migrations[ext](result, version, info) result['stac_version'] = STAC_VERSION return result
1,178
def contact_infectivity_symptomatic_20x50(): """ Real Name: b'contact infectivity symptomatic 20x50' Original Eqn: b'contacts per person symptomatic 20x50*infectivity per contact' Units: b'1/Day' Limits: (None, None) Type: component b'' """ return contacts_per_person_symptomatic_20x50() * infectivity_per_contact()
1,179
def smart_wn_search(wn, query, pos=None, report_file=None, compact=True, lang='eng', with_eng=True): """ Search synset in WordNet Gloss Corpus by term""" if report_file is None: report_file = TextReport() # Default to stdout report_file.print("Search Wordnet: Query=%s | POS=%s" % (query, pos)) with wn.ctx() as ctx: synsets = search_wn_full_text(wn, query, pos=pos, lang=lang, ctx=ctx) if with_eng and lang != 'eng': synsets_eng = SynsetCollection() for synset in synsets: synset_eng = wn.get_synset(synset.ID, lang='eng', ctx=ctx) synsets_eng.add(synset_eng) dump_synsets(synsets, synsets_eng, report_file=report_file, compact=compact) else: dump_synsets(synsets, report_file=report_file, compact=compact) return synsets
1,180
def write_conllulex_formatted_tags_to_file(prediction_file: TextIO, gold_file: TextIO, batch_tags: List[str], batch_gold_tags: List[str], batch_upos: List[str]): """ Prints predicate argument predictions and gold labels for a single sentence to two provided file references. The CoNLL-U-Lex format is described in `the STREUSLE documentation <https://github.com/nert-nlp/streusle/blob/master/CONLLULEX.md>`_ . Parameters ---------- prediction_file : TextIO, required. A file reference to print predictions to. gold_file : TextIO, required. A file reference to print gold labels to. batch_gold_tags : List[str], required. The predicted tags. batch_gold_tags : List[str], required. The gold tags. batch_upos : List[str], required. The UPOS tags. """ for predicted, gold, upos in zip(batch_tags, batch_gold_tags, batch_upos): # TODO add metadata: sent_id, text, streusle_sent_id, mwe # TODO add UD columns: ID, FORM, LEMMA, XPOS, FEATS, HEADS, DEPREL, DEPS, MISC # TODO add lex columns: SMWE, LEXCAT, LEXLEMMA, SS, SS2, WMWE, WCAT, WLEMMA print(upos, predicted, sep="\t", file=prediction_file) print(upos, gold, sep="\t", file=gold_file) print(file=prediction_file) print(file=gold_file)
1,181
def _parse_integrator(int_method): """parse the integrator method to pass to C""" #Pick integrator if int_method.lower() == 'rk4_c': int_method_c= 1 elif int_method.lower() == 'rk6_c': int_method_c= 2 elif int_method.lower() == 'symplec4_c': int_method_c= 3 elif int_method.lower() == 'symplec6_c': int_method_c= 4 elif int_method.lower() == 'dopr54_c': int_method_c= 5 elif int_method.lower() == 'dop853_c': int_method_c= 6 else: int_method_c= 0 return int_method_c
1,182
def get_diffusion_features(repo_path, branch): """ Function that extracts the first commits diffusion features. It then starts a number of processes(equal to the number of cores on the computer), and then distributes the remaining commits to them. """ repo = Repository(repo_path) head = repo.references.get(branch) commits = list( repo.walk(head.target, GIT_SORT_TOPOLOGICAL | GIT_SORT_REVERSE)) initial = commits[0] init_tree = initial.tree # Count inital total lines of code init_total_additions = 0 init_file_addtions = [] init_subdirectories = 0 init_modules = 0 for entry in init_tree: if entry.type == "tree": added, file_additions, subdirectories = parse_tree(entry, repo) init_modules += 1 init_file_addtions.extend(file_additions) init_total_additions += added init_subdirectories += subdirectories else: try: additions = len(str(repo[entry.id]).split('\n')) init_total_additions += additions init_file_addtions.append(additions) except: continue diffusion_features = [] diffusion_features.append(initial.hex) diffusion_features.append(init_subdirectories) diffusion_features.append(init_modules) diffusion_features.append( count_entropy(init_file_addtions, init_total_additions)) # Check how many processes that could be spawned cpus = cpu_count() print("Using {} cpus...".format(cpus)) # Divide the commits eqaully between the processes. quote, remainder = divmod(len(commits), cpus) processes = [ Process( target=parse_diffusion_features, args=(i, repo_path, branch, i * quote + min(i, remainder), (i + 1) * quote + min(i + 1, remainder))) for i in range(cpus) ] for process in processes: process.start() start_time = time.time() for process in processes: process.join() end_time = time.time() print("Done") print("Overall processing time {}".format(end_time - start_time)) # Assemble the results features = [] for _, feat in RES.items(): features.extend(feat) features = list(reversed(features)) features.append(diffusion_features) return features
1,183
def convert_example(example, tokenizer, label_list, max_seq_length=512, is_test=False): """ Builds model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. And creates a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence has the following format: - single sequence: ``[CLS] X [SEP]`` - pair of sequences: ``[CLS] A [SEP] B [SEP]`` A BERT sequence pair mask has the following format: :: 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | If only one sequence, only returns the first portion of the mask (0's). Args: example(obj:`list[str]`): List of input data, containing text and label if it have label. tokenizer(obj:`PretrainedTokenizer`): This tokenizer inherits from :class:`~paddlenlp.transformers.PretrainedTokenizer` which contains most of the methods. Users should refer to the superclass for more information regarding methods. label_list(obj:`list[str]`): All the labels that the data has. max_seq_len(obj:`int`): The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded. is_test(obj:`False`, defaults to `False`): Whether the example contains label or not. Returns: input_ids(obj:`list[int]`): The list of token ids. token_type_ids(obj: `list[int]`): List of sequence pair mask. label(obj:`numpy.array`, data type of int64, optional): The input label if not is_test. """ text = example encoded_inputs = tokenizer(text=text, max_seq_len=max_seq_length) input_ids = encoded_inputs["input_ids"] token_type_ids = encoded_inputs["token_type_ids"] if not is_test: # create label maps label_map = {} for (i, l) in enumerate(label_list): label_map[l] = i label = label_map[label] label = np.array([label], dtype="int64") return input_ids, token_type_ids, label else: return input_ids, token_type_ids
1,184
def load_config_file(filepath): """ Load a configuration as an options dict. Format of the file is given with filepath extension. :param filepath: :type filepath: :return: :rtype: """ if filepath.endswith('.json'): with open(filepath) as config_file_data: return json.load(config_file_data) if filepath.endswith('.yaml') or filepath.endswith('.yml'): try: import yaml with open(filepath) as config_file_data: return yaml.load(config_file_data) except ImportError: # pragma: no cover raise ConfigurationException('Configuration file extension is not supported. ' 'PyYAML should be installed to support "%s" file' % ( filepath,)) try: # Try to load input as JSON return json.loads(filepath) except: # pylint: disable=bare-except pass raise ConfigurationException('Configuration file extension is not supported for "%s" file.' % (filepath,))
1,185
def coordinator_setup(start_heart=True): """ Sets up the client for the coordination service. URL examples for connection: zake:// file:///tmp redis://username:password@host:port mysql://username:password@host:port/dbname """ url = cfg.CONF.coordination.url lock_timeout = cfg.CONF.coordination.lock_timeout member_id = get_member_id() if url: coordinator = coordination.get_coordinator(url, member_id, lock_timeout=lock_timeout) else: # Use a no-op backend # Note: We don't use tooz to obtain a reference since for this to work we would need to # register a plugin inside setup.py entry_point and use python setup.py develop for tests # to work coordinator = NoOpDriver(member_id) coordinator.start(start_heart=start_heart) return coordinator
1,186
def _to_jraph(example): """Converts an example graph to jraph.GraphsTuple.""" example = jax.tree_map(lambda x: x._numpy(), example) # pylint: disable=protected-access edge_feat = example['edge_feat'] node_feat = example['node_feat'] edge_index = example['edge_index'] labels = example['labels'] num_nodes = example['num_nodes'] senders = edge_index[:, 0] receivers = edge_index[:, 1] return jraph.GraphsTuple( n_node=num_nodes, n_edge=np.array([len(edge_index) * 2]), nodes=node_feat, edges=np.concatenate([edge_feat, edge_feat]), # Make the edges bidirectional senders=np.concatenate([senders, receivers]), receivers=np.concatenate([receivers, senders]), # Keep the labels with the graph for batching. They will be removed # in the processed batch. globals=np.expand_dims(labels, axis=0))
1,187
def test_cmd_dict_input_with_args(): """Single command string works with multiple args.""" cmd = get_cmd('tests/testfiles/cmds/args.sh', r'tests\testfiles\cmds\args.bat') context = Context({'a': 'one', 'b': 'two two', 'c': 'three', 'd': cmd, 'cmd': { 'run': '{d} {a} "{b}" {c}'}}) pypyr.steps.cmd.run_step(context) assert 'cmdOut' not in context
1,188
def get_urls(page_links): """Insert page links, return list of url addresses of the json""" urls = [] for link in page_links: link1 = link.replace('v3', 'VV') game_id = ''.join([char for char in link1 if char in list(map(str, list(range(10))))]) json_url = f'http://www.afa.com.ar/deposito/html/v3/htmlCenter/data/deportes/futbol/primeraa/events/{game_id}.json' urls.append(json_url) return urls
1,189
def game_over(username): """When the user have lost the game. Args: username: A string representing the username. """ print os.linesep + "Game Over!!! " + username + ", I am sorry! Better luck next time! :-)" + os.linesep if raw_input(username + " would you like to play again? (y/n) ").lower() == 'y': game_loop(username) else: print username + ", thank you for playing, see you soon!" + os.linesep exit()
1,190
def KK_RC79_fit(params, w, t_values): """ Kramers-Kronig Function: -RC- Kristian B. Knudsen ([email protected] / [email protected]) """ Rs = params["Rs"] R1 = params["R1"] R2 = params["R2"] R3 = params["R3"] R4 = params["R4"] R5 = params["R5"] R6 = params["R6"] R7 = params["R7"] R8 = params["R8"] R9 = params["R9"] R10 = params["R10"] R11 = params["R11"] R12 = params["R12"] R13 = params["R13"] R14 = params["R14"] R15 = params["R15"] R16 = params["R16"] R17 = params["R17"] R18 = params["R18"] R19 = params["R19"] R20 = params["R20"] R21 = params["R21"] R22 = params["R22"] R23 = params["R23"] R24 = params["R24"] R25 = params["R25"] R26 = params["R26"] R27 = params["R27"] R28 = params["R28"] R29 = params["R29"] R30 = params["R30"] R31 = params["R31"] R32 = params["R32"] R33 = params["R33"] R34 = params["R34"] R35 = params["R35"] R36 = params["R36"] R37 = params["R37"] R38 = params["R38"] R39 = params["R39"] R40 = params["R40"] R41 = params["R41"] R42 = params["R42"] R43 = params["R43"] R44 = params["R44"] R45 = params["R45"] R46 = params["R46"] R47 = params["R47"] R48 = params["R48"] R49 = params["R49"] R50 = params["R50"] R51 = params["R51"] R52 = params["R52"] R53 = params["R53"] R54 = params["R54"] R55 = params["R55"] R56 = params["R56"] R57 = params["R57"] R58 = params["R58"] R59 = params["R59"] R60 = params["R60"] R61 = params["R61"] R62 = params["R62"] R63 = params["R63"] R64 = params["R64"] R65 = params["R65"] R66 = params["R66"] R67 = params["R67"] R68 = params["R68"] R69 = params["R69"] R70 = params["R70"] R71 = params["R71"] R72 = params["R72"] R73 = params["R73"] R74 = params["R74"] R75 = params["R75"] R76 = params["R76"] R77 = params["R77"] R78 = params["R78"] R79 = params["R79"] return ( Rs + (R1 / (1 + w * 1j * t_values[0])) + (R2 / (1 + w * 1j * t_values[1])) + (R3 / (1 + w * 1j * t_values[2])) + (R4 / (1 + w * 1j * t_values[3])) + (R5 / (1 + w * 1j * t_values[4])) + (R6 / (1 + w * 1j * t_values[5])) + (R7 / (1 + w * 1j * t_values[6])) + (R8 / (1 + w * 1j * t_values[7])) + (R9 / (1 + w * 1j * t_values[8])) + (R10 / (1 + w * 1j * t_values[9])) + (R11 / (1 + w * 1j * t_values[10])) + (R12 / (1 + w * 1j * t_values[11])) + (R13 / (1 + w * 1j * t_values[12])) + (R14 / (1 + w * 1j * t_values[13])) + (R15 / (1 + w * 1j * t_values[14])) + (R16 / (1 + w * 1j * t_values[15])) + (R17 / (1 + w * 1j * t_values[16])) + (R18 / (1 + w * 1j * t_values[17])) + (R19 / (1 + w * 1j * t_values[18])) + (R20 / (1 + w * 1j * t_values[19])) + (R21 / (1 + w * 1j * t_values[20])) + (R22 / (1 + w * 1j * t_values[21])) + (R23 / (1 + w * 1j * t_values[22])) + (R24 / (1 + w * 1j * t_values[23])) + (R25 / (1 + w * 1j * t_values[24])) + (R26 / (1 + w * 1j * t_values[25])) + (R27 / (1 + w * 1j * t_values[26])) + (R28 / (1 + w * 1j * t_values[27])) + (R29 / (1 + w * 1j * t_values[28])) + (R30 / (1 + w * 1j * t_values[29])) + (R31 / (1 + w * 1j * t_values[30])) + (R32 / (1 + w * 1j * t_values[31])) + (R33 / (1 + w * 1j * t_values[32])) + (R34 / (1 + w * 1j * t_values[33])) + (R35 / (1 + w * 1j * t_values[34])) + (R36 / (1 + w * 1j * t_values[35])) + (R37 / (1 + w * 1j * t_values[36])) + (R38 / (1 + w * 1j * t_values[37])) + (R39 / (1 + w * 1j * t_values[38])) + (R40 / (1 + w * 1j * t_values[39])) + (R41 / (1 + w * 1j * t_values[40])) + (R42 / (1 + w * 1j * t_values[41])) + (R43 / (1 + w * 1j * t_values[42])) + (R44 / (1 + w * 1j * t_values[43])) + (R45 / (1 + w * 1j * t_values[44])) + (R46 / (1 + w * 1j * t_values[45])) + (R47 / (1 + w * 1j * t_values[46])) + (R48 / (1 + w * 1j * t_values[47])) + (R49 / (1 + w * 1j * t_values[48])) + (R50 / (1 + w * 1j * t_values[49])) + (R51 / (1 + w * 1j * t_values[50])) + (R52 / (1 + w * 1j * t_values[51])) + (R53 / (1 + w * 1j * t_values[52])) + (R54 / (1 + w * 1j * t_values[53])) + (R55 / (1 + w * 1j * t_values[54])) + (R56 / (1 + w * 1j * t_values[55])) + (R57 / (1 + w * 1j * t_values[56])) + (R58 / (1 + w * 1j * t_values[57])) + (R59 / (1 + w * 1j * t_values[58])) + (R60 / (1 + w * 1j * t_values[59])) + (R61 / (1 + w * 1j * t_values[60])) + (R62 / (1 + w * 1j * t_values[61])) + (R63 / (1 + w * 1j * t_values[62])) + (R64 / (1 + w * 1j * t_values[63])) + (R65 / (1 + w * 1j * t_values[64])) + (R66 / (1 + w * 1j * t_values[65])) + (R67 / (1 + w * 1j * t_values[66])) + (R68 / (1 + w * 1j * t_values[67])) + (R69 / (1 + w * 1j * t_values[68])) + (R70 / (1 + w * 1j * t_values[69])) + (R71 / (1 + w * 1j * t_values[70])) + (R72 / (1 + w * 1j * t_values[71])) + (R73 / (1 + w * 1j * t_values[72])) + (R74 / (1 + w * 1j * t_values[73])) + (R75 / (1 + w * 1j * t_values[74])) + (R76 / (1 + w * 1j * t_values[75])) + (R77 / (1 + w * 1j * t_values[76])) + (R78 / (1 + w * 1j * t_values[77])) + (R79 / (1 + w * 1j * t_values[78])) )
1,191
def _resampling_from_str(resampling: str) -> Resampling: """ Match a rio.warp.Resampling enum from a string representation. :param resampling: A case-sensitive string matching the resampling enum (e.g. 'cubic_spline') :raises ValueError: If no matching Resampling enum was found. :returns: A rio.warp.Resampling enum that matches the given string. """ # Try to match the string version of the resampling method with a rio Resampling enum name for method in rio.warp.Resampling: if str(method).replace("Resampling.", "") == resampling: resampling_method = method break # If no match was found, raise an error. else: raise ValueError( f"'{resampling}' is not a valid rasterio.warp.Resampling method. " f"Valid methods: {[str(method).replace('Resampling.', '') for method in rio.warp.Resampling]}" ) return resampling_method
1,192
def _call_twitter_api(query): """helper function to call twitter api Args: query (str): query string made by _preprocess_query function Returns: generator: response object in generator """ return sntwitter.TwitterSearchScraper(query=query).get_items()
1,193
def parse_superfamilies(filepath: str) -> List[Method]: """ Parse the CathNames.txt file distributed with CATH-Gene3D releases :param filepath: :return: """ signatures = [] reg = re.compile(r"^(\d\.\d+\.\d+\.\d+)\s+([a-zA-Z0-9]+)\s+:(.*)$") with open(filepath, "rt") as fh: for line in fh: if line[0] == '#': continue m = reg.match(line) if m is None: continue supfam, model, name = m.groups() accession = f"{_PREFIX}{supfam}" m = Method(accession, _TYPE_SUPFAM, description=name) signatures.append(m) return signatures
1,194
def on_click(event): """ On left-click (=event) the 'disabled' entry is free, and the placeholder deleted """ entry.configure(state=NORMAL) entry.delete(0, END) # make the callback only work once entry.unbind('<Button-1>', click)
1,195
def Weekday(datetime): """Returns a weekday for display e.g. Mon.""" return datetime.strftime('%a')
1,196
def test_get_current_week_number_returns_1_on_first_week_of_semester( current_date: datetime, ): """ Assumes a semester ends at week 22 (included), which has been the case so far. Any date within the first week should return 1, EXCLUDING the following Sunday, on which day the script updates in advance for the following week. Args: current_date (datetime): Receive a datetime object from Hypothesis. """ next_semester_start_date = datetime(2021, 10, 11) # Monday result = update_week.get_current_week_number( current_date, next_semester_start_date ) assert result == 1
1,197
def ExposeXcresult(xcresult_path, output_path): """Exposes the files from xcresult. The files includes the diagnostics files and attachments files. Args: xcresult_path: string, path of xcresult bundle. output_path: string, path of output directory. """ root_result_bundle = _GetResultBundleObject(xcresult_path, bundle_id=None) actions = root_result_bundle['actions']['_values'] action_result = None for action in actions: if action['_type']['_name'] == 'ActionRecord': action_result = action['actionResult'] break if action_result is None: raise ios_errors.XcresultError( 'Failed to get "ActionResult" from result bundle %s' % root_result_bundle) _ExposeDiagnostics(xcresult_path, output_path, action_result) _ExposeAttachments(xcresult_path, output_path, action_result)
1,198
def host_list(request): """List all code hosts :rtype: json """ hosts = Host.objects.all() serializer = host_serializer(hosts, many=True) return JsonResponse(serializer.data, safe=False)
1,199