content
stringlengths
22
815k
id
int64
0
4.91M
def _makeSSDF(row, minEvents): """ Function to change form of TRDF for subpace creation """ index = range(len(row.Clust)) columns = [x for x in row.index if x != 'Clust'] DF = pd.DataFrame(index=index, columns=columns) DF['Name'] = ['SS%d' % x for x in range(len(DF))] # name subspaces # Initialize columns for future use DF['Events'] = object DF['AlignedTD'] = object DF['SVD'] = object DF['UsedSVDKeys'] = object DF['FracEnergy'] = object DF['SVDdefined'] = False DF['SampleTrims'] = [{} for x in range(len(DF))] DF['Threshold'] = np.float DF['SigDimRep'] = object DF['FAS'] = object DF['NumBasis'] = int DF['Offsets'] = object DF['Stats'] = object DF['MPtd'] = object DF['MPfd'] = object DF['Channels'] = object DF['Station'] = row.Station DF = DF.astype(object) for ind, row2 in DF.iterrows(): evelist = row.Clust[ind] evelist.sort() DF['Events'][ind] = evelist DF['numEvents'][ind] = len(evelist) DF['MPtd'][ind] = _trimDict(row, 'MPtd', evelist) DF['MPfd'][ind] = _trimDict(row, 'MPfd', evelist) DF['Stats'][ind] = _trimDict(row, 'Stats', evelist) DF['Channels'][ind] = _trimDict(row, 'Channels', evelist) # only keep subspaces that meet min req, dont renumber DF = DF[[len(x) >= minEvents for x in DF.Events]] # DF.reset_index(drop=True, inplace=True) return DF
1,800
def save_checkpoint(logdir, epoch, global_step, model, optimizer): """Saves the training state into the given log dir path.""" checkpoint_file_name = os.path.join(logdir, 'step-%03dK.pth' % (global_step // 1000)) print("saving the checkpoint file '%s'..." % checkpoint_file_name) checkpoint = { 'epoch': epoch + 1, 'global_step': global_step, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict(), } torch.save(checkpoint, checkpoint_file_name) del checkpoint
1,801
def concatenate_constraints(original_set, additional_set): """ Method for concatenating sets of linear constraints. original_set and additional_set are both tuples of for (C, b, n_eq). Output is a concatenated tuple of same form. All equality constraints are always kept on top. """ C_org, b_org, n_org = original_set C_add, b_add, n_add = additional_set if n_add > 0: C_out = np.insert(C_org, n_org, C_add[:n_add, :], axis=0) C_out = np.concatenate((C_out, C_add[n_add:, :])) b_out = np.insert(b_org, n_org, b_add[:n_add]) b_out = np.concatenate((b_out, b_add[n_add:])) else: C_out = np.concatenate((C_org, C_add)) b_out = np.concatenate((b_org, b_add)) n_out = n_org + n_add return C_out, b_out, n_out
1,802
def _isDefaultHandler(): """ Determine whether the I{SIGCHLD} handler is the default or not. """ return signal.getsignal(signal.SIGCHLD) == signal.SIG_DFL
1,803
def downsampling(conversion_rate,data,fs): """ ダウンサンプリングを行う. 入力として,変換レートとデータとサンプリング周波数. アップサンプリング後のデータとサンプリング周波数を返す. """ # 間引くサンプル数を決める decimationSampleNum = conversion_rate-1 # FIRフィルタの用意をする nyqF = (fs/conversion_rate)/2.0 # 変換後のナイキスト周波数 cF = (fs/conversion_rate/2.0-500.)/nyqF # カットオフ周波数を設定(変換前のナイキスト周波数より少し下を設定) taps = 511 # フィルタ係数(奇数じゃないとだめ) b = scipy.signal.firwin(taps, cF) # LPFを用意 #フィルタリング data = scipy.signal.lfilter(b,1,data) #間引き処理 downData = [] for i in range(0,len(data),decimationSampleNum+1): downData.append(data[i]) return (downData,fs/conversion_rate)
1,804
def get_client_cache_key( request_or_attempt: Union[HttpRequest, AccessBase], credentials: dict = None ) -> str: """ Build cache key name from request or AccessAttempt object. :param request_or_attempt: HttpRequest or AccessAttempt object :param credentials: credentials containing user information :return cache_key: Hash key that is usable for Django cache backends """ if isinstance(request_or_attempt, AccessBase): username = request_or_attempt.username ip_address = request_or_attempt.ip_address user_agent = request_or_attempt.user_agent else: username = get_client_username(request_or_attempt, credentials) ip_address = get_client_ip_address(request_or_attempt) user_agent = get_client_user_agent(request_or_attempt) filter_kwargs_list = get_client_parameters(username, ip_address, user_agent) return make_cache_key_list(filter_kwargs_list)
1,805
def test_empty_schema() -> None: """Test that SchemaModel supports empty schemas.""" empty_schema = pa.DataFrameSchema() class EmptySchema(pa.SchemaModel): pass assert empty_schema == EmptySchema.to_schema() class Schema(pa.SchemaModel): a: Series[int] class EmptySubSchema(Schema): pass schema = pa.DataFrameSchema({"a": pa.Column(int)}) assert schema == EmptySubSchema.to_schema() class EmptyParentSchema(EmptySchema): a: Series[int] assert schema == EmptyParentSchema.to_schema()
1,806
def loadMaterials(matFile): """ Loads materials into Tom's code from external file of all applicable materials. These are returned as a dictionary. """ mats = {} name, no, ne, lto, lte, mtype = np.loadtxt(matFile, dtype=np.str, unpack=True) no = np.array(list(map(np.float, no))) ne = np.array(list(map(np.float, ne))) lto = 1.0e-4 * np.array(list(map(np.float, lto))) lte = 1.0e-4 * np.array(list(map(np.float, lte))) for (i,n) in enumerate(name): mats[n] = tm.material(no[i], ne[i], lto[i], lte[i], n, mtype[i]) return mats
1,807
def prepend_with_baseurl(files, base_url): """prepend url to beginning of each file Parameters ------ files (list): list of files base_url (str): base url Returns ------ list: a list of files with base url pre-pended """ return [base_url + file for file in files]
1,808
def streamplot( x, y, u, v, p=None, density=1, color="#1f77b4", line_width=None, alpha=1, arrow_size=7, min_length=0.1, start_points=None, max_length=4.0, integration_direction="both", arrow_level="underlay", **kwargs, ): """Draws streamlines of a vector field. Parameters ---------- x, y : 1d arrays an evenly spaced grid. u, v : 2d arrays x and y-velocities. Number of rows should match length of y, and the number of columns should match x. p : bokeh.plotting.Figure instance, default None Figure to populate with glyphs. If None, create a new figure. density : float or 2-tuple Controls the closeness of streamlines. When `density = 1`, the domain is divided into a 30x30 grid---density linearly scales this grid. Each cell in the grid can have, at most, one traversing streamline. For different densities in each direction, use [density_x, density_y]. color : str or 2d array, default '#1f77b4' (Bokeh default color) Streamline color. When given an array with the same shape as velocities, color values are converted to colors using cmap. line_width : numeric or 2d array, default None vary linewidth when given a 2d array with the same shape as velocities. If None, scale linewidth with speed. arrow_size : float Factor scale arrow size. min_length : float Minimum length of streamline in axes coordinates. start_points: Nx2 array Coordinates of starting points for the streamlines. In data coordinates, the same as the ``x`` and ``y`` arrays. max_length : float Maximum length of streamline in axes coordinates. integration_direction : ['forward', 'backward', 'both'] Integrate the streamline in forward, backward or both directions. arrow_level : str Either 'underlay' or 'overlay'. kwargs : All other kwargs are passed to bokeh.plotting.figure() when generating the figure. Returns ------- bokeh.plotting.Figure instance populated with streamplot. Notes ----- .. Adapted from matplotlib.streamplot.streamplot.py. """ if p is None: p = _baseplot(p, **kwargs) # Ensure plot fits stream lines p.x_range = bokeh.models.Range1d(x[0], x[-1]) p.y_range = bokeh.models.Range1d(y[0], y[-1]) if line_width is None: # Compute speed speed = np.sqrt(u ** 2 + v ** 2) # Make linewidths proportional to speed, with min width 0.5 and max 3 line_width = 0.5 + 2.5 * speed / speed.max() xs, ys, line_widths, arrowtails, arrowheads = _streamlines( x, y, u, v, density=density, line_width=line_width, min_length=min_length, start_points=start_points, max_length=max_length, integration_direction=integration_direction, ) def _draw_arrows(): for tail, head in zip(arrowtails, arrowheads): p.add_layout( bokeh.models.Arrow( line_alpha=0, end=bokeh.models.NormalHead(fill_color=color, line_alpha=0, size=7), x_start=tail[0], y_start=tail[1], x_end=head[0], y_end=head[1], ) ) if arrow_level == "underlay": _draw_arrows() p.multi_line(xs, ys, color=color, line_width=line_widths, line_alpha=alpha) else: p.multi_line(xs, ys, color=color, line_width=line_widths, line_alpha=alpha) _draw_arrows() return p
1,809
def wait_for_workspace_to_start(self, *, workspace_pk): """Checks if the workspace is up for up to 10 minutes.""" workspace = Workspace.objects.get(pk=workspace_pk) if workspace.status != WorkspaceStatus.PENDING: # Nothing to do return with requests.Session() as s: _authorise(client=s, auth=workspace.user.workbench_token) instance = _get_workspace( s, workspace_id=workspace.service_workbench_id ) if instance["status"] == WorkspaceStatus.PENDING: # Raises celery.exceptions.Retry self.retry(countdown=30) # TODO catch MaxRetriesExceeded? else: workspace.status = instance["status"] workspace.full_clean() workspace.save()
1,810
def _loc(df, start, stop, include_right_boundary=True): """ >>> df = pd.DataFrame({'x': [10, 20, 30, 40, 50]}, index=[1, 2, 2, 3, 4]) >>> _loc(df, 2, None) x 2 20 2 30 3 40 4 50 >>> _loc(df, 1, 3) x 1 10 2 20 2 30 3 40 >>> _loc(df, 1, 3, include_right_boundary=False) x 1 10 2 20 2 30 """ result = df.loc[start:stop] if not include_right_boundary: right_index = result.index.get_slice_bound(stop, 'left', 'loc') result = result.iloc[:right_index] return result
1,811
def test_legacy_yaml(tmpdir, install_mockery, mock_packages): """Tests a simple legacy YAML with a dependency and ensures spec survives concretization.""" yaml = """ spec: - a: version: '2.0' arch: platform: linux platform_os: rhel7 target: x86_64 compiler: name: gcc version: 8.3.0 namespace: builtin.mock parameters: bvv: true foo: - bar foobar: bar cflags: [] cppflags: [] cxxflags: [] fflags: [] ldflags: [] ldlibs: [] dependencies: b: hash: iaapywazxgetn6gfv2cfba353qzzqvhn type: - build - link hash: obokmcsn3hljztrmctbscmqjs3xclazz full_hash: avrk2tqsnzxeabmxa6r776uq7qbpeufv build_hash: obokmcsn3hljztrmctbscmqjs3xclazy - b: version: '1.0' arch: platform: linux platform_os: rhel7 target: x86_64 compiler: name: gcc version: 8.3.0 namespace: builtin.mock parameters: cflags: [] cppflags: [] cxxflags: [] fflags: [] ldflags: [] ldlibs: [] hash: iaapywazxgetn6gfv2cfba353qzzqvhn full_hash: qvsxvlmjaothtpjluqijv7qfnni3kyyg build_hash: iaapywazxgetn6gfv2cfba353qzzqvhy """ spec = Spec.from_yaml(yaml) concrete_spec = spec.concretized() assert concrete_spec.eq_dag(spec)
1,812
def compare_system_and_attributes_faulty_systems(self): """compare systems and associated attributes""" # compare - systems / attributes self.assertTrue(System.objects.filter(system_name='system_csv_31_001').exists()) self.assertTrue(System.objects.filter(system_name='system_csv_31_003').exists()) self.assertTrue(System.objects.filter(system_name='system_csv_31_006').exists()) # compare - systems / attributes self.assertEqual( System.objects.get(system_name='system_csv_31_001').analysisstatus, Analysisstatus.objects.get(analysisstatus_name='analysisstatus_1'), ) self.assertEqual( System.objects.get(system_name='system_csv_31_003').analysisstatus, Analysisstatus.objects.get(analysisstatus_name='analysisstatus_1'), ) self.assertEqual( System.objects.get(system_name='system_csv_31_006').analysisstatus, Analysisstatus.objects.get(analysisstatus_name='analysisstatus_1'), ) self.assertEqual( System.objects.get(system_name='system_csv_31_001').systemstatus, Systemstatus.objects.get(systemstatus_name='systemstatus_1'), ) self.assertEqual( System.objects.get(system_name='system_csv_31_003').systemstatus, Systemstatus.objects.get(systemstatus_name='systemstatus_1'), ) self.assertEqual( System.objects.get(system_name='system_csv_31_006').systemstatus, Systemstatus.objects.get(systemstatus_name='systemstatus_1'), ) # return to test function return self
1,813
def editPostgresConf(): """ edit /var/lib/pgsql/data/postgresql.conf and change max_connections to 150 """ try: tempFile = tempfile.mktemp(dir="/tmp") logging.debug("copying %s over %s", basedefs.FILE_PSQL_CONF, tempFile) shutil.copy2(basedefs.FILE_PSQL_CONF, tempFile) handler = utils.TextConfigFileHandler(tempFile) handler.open() handler.editParam("max_connections", basedefs.CONST_MAX_PSQL_CONNS) handler.close() logging.debug("copying temp file over original file") shutil.copy2(tempFile, basedefs.FILE_PSQL_CONF) logging.debug("setting permissions & file ownership") os.chown(basedefs.FILE_PSQL_CONF, utils.getUsernameId("postgres"), utils.getGroupId("postgres")) os.chmod(basedefs.FILE_PSQL_CONF, 0600) logging.debug("removing tempoarary file") os.remove(tempFile) except: logging.error("Failed editing %s" % basedefs.FILE_PSQL_CONF) logging.error(traceback.format_exc()) raise Exception(output_messages.ERR_EXP_EDIT_PSQL_CONF)
1,814
def get_regions(contig,enzymes): """return loci with start and end locations""" out_sites = [] enz_1 = [enz for enz in Restriction.AllEnzymes if "%s"%enz == enzymes[0]][0] enz_2 = [enz for enz in Restriction.AllEnzymes if "%s"%enz == enzymes[1]][0] enz_1_sites = enz_1.search(contig.seq) enz_2_sites = enz_2.search(contig.seq) combined_sites = sorted(enz_1_sites + enz_2_sites) for i in range(len(combined_sites)): site_A = combined_sites[i] try: site_B = combined_sites[i+1] except IndexError: break if site_B - site_A < 30: continue if site_A in enz_1_sites and site_B in enz_2_sites: out_sites.append((site_A + 1, site_B - len(enz_2.site))) elif site_A in enz_2_sites and site_B in enz_1_sites: out_sites.append((site_A + 1, site_B - len(enz_1.site))) return out_sites
1,815
def getHighContrast(j17, j18, d17, d18): """ contrast enhancement through stacking """ summer = j17 + j18 summer = summer / np.amax(summer) winter = d17 + d18 winter = winter / np.amax(winter) diff = winter * summer return diff
1,816
def get_bounding_box(dataframe, dataIdentifier): """Returns the rectangle in a format (min_lat, max_lat, min_lon, max_lon) which bounds all the points of the ´dataframe´. Parameters ---------- dataframe : pandas.DataFrame the dataframe with the data dataIdentifier : DataIdentifier the identifier of the dataframe to be used """ b_box = (getattr(dataframe, dataIdentifier.latitude).min(), getattr(dataframe, dataIdentifier.latitude).max(), getattr(dataframe, dataIdentifier.longitude).min(), getattr(dataframe, dataIdentifier.longitude).max()) return b_box
1,817
def get_file_download_response(dbfile): """ Create the HttpResponse for serving a file. The file is not read our output - instead, by setting `X-Accel-Redirect`- header, the web server (nginx) directly serves the file. """ mimetype = dbfile.mimeType response = HttpResponse(content_type=mimetype) response["Content-Disposition"] = "inline; filename={0}".format( to_safe_name(dbfile.name) ) response['X-Accel-Redirect'] = "/{0}".format(dbfile.path) return response
1,818
def keyWait(): """Waits until the user presses a key. Then returns a L{KeyDown} event. Key events will repeat if held down. A click to close the window will be converted into an Alt+F4 KeyDown event. @rtype: L{KeyDown} """ while 1: for event in get(): if event.type == 'KEYDOWN': return event if event.type == 'QUIT': # convert QUIT into alt+F4 return KeyDown('F4', '', True, False, True, False, False) time.sleep(.001)
1,819
def create_project( org_id: str, api_key: str, url: str, label_type: LabelType, taxonomy: str = "DEFAULT::Berkeley Deep Drive (BDD)", reviews: int = 1, ) -> Generator[Tuple[str, RBProject], None, None]: """Create project.""" profile = datetime.strftime(datetime.now(), "%Y%m%d%H%M%S%f") + str( random.randint(0, 1000) ) project_name = f"cli-{profile}" home_dir = os.path.expanduser("~") project_dir = os.path.join(home_dir, project_name) project: Optional[RBProject] = None try: os.makedirs(project_dir) subprocess.run( [ "redbrick", "config", "add", "-o", org_id, "-k", api_key, "-u", url, "-p", profile, ], check=True, ) subprocess.run(["redbrick", "config", "set", profile], check=True) subprocess.run( [ "redbrick", "init", "-n", project_name, "-t", taxonomy, "-l", label_type.value, "-r", str(reviews), project_dir, ], check=True, ) cache_name = os.listdir(os.path.join(project_dir, ".redbrick", "cache")) with open( os.path.join( project_dir, ".redbrick", "cache", cache_name[0], "project.pickle" ), "rb", ) as file_: project = pickle.load(file_) assert project yield project_dir, project finally: os.chdir(home_dir) shutil.rmtree(project_dir, ignore_errors=True) if project: project.context.project.delete_project(project.org_id, project.project_id) if os.environ.get("REDBRICK_SDK_SOURCE") == "GITHUB": subprocess.run(["redbrick", "config", "clear"], check=True) else: subprocess.run(["redbrick", "config", "remove", profile], check=True)
1,820
def create_comentarios_instancia(id_instancia): """ @retorna un ok en caso de que se halla ejecutado la operacion @except status 500 en caso de presentar algun error """ from datetime import datetime if request.method == 'POST': try: values = json.loads( request.data.decode('8859') ) mensaje = values['com_mensaje'] autor = values['com_usuario'] fecha = datetime.today() comentario = comentarios_instancia_curso(instancias_curso_id = id_instancia , mensaje = mensaje , autor = autor, fecha = fecha) session.add(comentario) session.commit() except Exception, e: session.rollback() return "Operacion No se pudo llevar a cabo", 500 return "ok" else: return "Operacion No se pudo llevar a cabo", 500
1,821
def callEvalGen(args: argparse.Namespace): """ Method for evaluation of the keywords generation task on the Inspec dataset. :param args: User arguments. :type args: argparse.Namespace """ return evalOn(args, "uncontr")
1,822
async def osfrog(msg, mobj): """ Patch 7.02: help string was removed from Captain's Mode """ osfrogs = [ "Added Monkey King to the game", "Reduced Lone Druid's respawn talent -50s to -40s", ] return await client.send_message(mobj.channel, choice(osfrogs))
1,823
def _add_normalizing_vector_point(mesh, minpt, maxpt): """ This function allows you to visualize all meshes in their size relative to each other It is a quick simple hack: by adding 2 vector points at the same x coordinates at the extreme left and extreme right of the largest .stl mesh, all the meshes are displayed with the same scale. input: [mesh], minpoint coordinates, maxpoint coordinates output: [mesh] with 2 added coordinate points """ newmesh = Mesh(np.zeros(mesh.vectors.shape[0]+2, dtype=Mesh.dtype)) # newmesh.vectors = np.vstack([mesh.vectors, # np.array([ [[0,maxpt,0], [0,maxpt,0], [0,maxpt,0]], # [[0,minpt,0], [0,minpt,0], [0,minpt,0]] ], float) ]) newmesh.vectors = np.vstack([mesh.vectors, np.array([ [[0,0,maxpt], [0,0,maxpt], [0,0,maxpt]], [[0,0,minpt], [0,0,minpt], [0,0,minpt]] ], float) ]) return newmesh
1,824
def run(): """ function to start flask apps""" host = os.getenv("HOST") or '127.0.0.1' app.run(host=host)
1,825
def radii_ratio(collection): """ The Flaherty & Crumplin (1992) index, OS_3 in Altman (1998). The ratio of the radius of the equi-areal circle to the radius of the MBC """ ga = _cast(collection) r_eac = numpy.sqrt(pygeos.area(ga) / numpy.pi) r_mbc = pygeos.minimum_bounding_radius(ga) return r_eac / r_mbc
1,826
def create_jwt(project_id, private_key_file, algorithm): """Create a JWT (https://jwt.io) to establish an MQTT connection.""" token = { 'iat': datetime.datetime.utcnow(), 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=60), 'aud': project_id } with open(private_key_file, 'r') as f: private_key = f.read() print('Creating JWT using {} from private key file {}'.format(algorithm, private_key_file)) return jwt.encode(token, private_key, algorithm=algorithm)
1,827
def update_analytics(node, file, version_idx, action='download'): """ :param Node node: Root node to update :param str file_id: The _id field of a filenode :param int version_idx: Zero-based version index :param str action: is this logged as download or a view """ # Pass in contributors and group members to check that their downloads # do not count towards total download count contributors = [] if getattr(node, 'contributors_and_group_members', None): contributors = node.contributors_and_group_members elif getattr(node, 'contributors', None): contributors = node.contributors node_info = { 'contributors': contributors } resource = node.guids.first() update_counter(resource, file, version=None, action=action, node_info=node_info) update_counter(resource, file, version_idx, action, node_info=node_info)
1,828
def games(engine1, engine2, number_of_games): """Let engine1 and engine2 play several games against each other. Each begin every second game.""" engine1_wins = 0 engine2_wins = 0 draws = 0 for n in range(number_of_games): if n % 2: result = game(engine1, engine2, True) else: result = game(engine1, engine2, False) if result == "engine1": engine1_wins += 1 elif result == "engine2": engine2_wins += 1 else: draws += 1 return ("engine1 wins: " + str(engine1_wins) + " engine2 wins: " + str(engine2_wins) + " draws: " + str(draws))
1,829
def tissue2line(data, line=None): """tissue2line Project tissue probability maps to the line by calculating the probability of each tissue type in each voxel of the 16x720 beam and then average these to get a 1x720 line. Discrete tissues are assigned by means of the highest probability of a particular tissue type. Parameters ---------- data: list,numpy.ndarray,str for tissue data: list of three numpy array/nifti images/strings describing the probability of white matter/gray matter and CSF line: str,nibabel.Nifti1Image,numpy.ndarray used for the direction of the line and should have the same dimensions as `data`. Generally this is the output from create_line_from_slice Returns ---------- numpy.ndarray (1,720) array of your `data` in the line """ # load in reference line data if isinstance(line, str): ref = nb.load(line).get_fdata() elif isinstance(line, nb.Nifti1Image): ref = line.get_fdata() elif isinstance(line, np.ndarray): ref = line else: raise ValueError("Unknown input type for line; should be a string, nifti-image, or numpy array") if isinstance(data, list): # we have receive a list, assuming tissue probability maps. if len(data) > 3: raise ValueError(f'Data contains {len(data)} items, this should be three: 1) WM prob, 2) GM prob, 3) CSF prob') if isinstance(data[0], str): input = [nb.load(i).get_fdata() for i in data] elif isinstance(data[0], nb.Nifti1Image): input = [i.get_fdata() for i in data] elif isinstance(data[0], np.ndarray): input = data # remove existing 4th dimension input = [np.squeeze(i, axis=3) for i in input if len(i.shape) == 4] for i in input: if i.shape != ref.shape: raise ValueError(f"Dimensions of line [{ref.shape}] do not match dimension of input seg [{i.shape}]") # put wm/gm/csf in three channels of a numpy array prob_stack = np.dstack([input[0],input[1],input[2]]) prob_stack_avg = np.average(prob_stack, axis=1) # normalize averages between 0-1 scaler = MinMaxScaler() scaler.fit(prob_stack_avg) avg_norm = scaler.transform(prob_stack_avg) output = [] lut = {'wm':2,'gm':1,'csf':0} # avg_norm has 3 columns; 1st = WM, 2nd = GM, 3rd = CSF for i,r in enumerate(avg_norm): max_val = np.amax(r) # check tissue type only if non-zero value. If all probabilities are 0 is should be set to zero regardless if max_val == 0: output.append(lut['csf']) else: # make list of each row for nicer indexing idx = list(r).index(max_val) if idx == 0: # type = 'wm' = '1' in nighres segmentation output.append(lut['wm']) elif idx == 1: # type = 'gm' = '2' in nighres segmentation output.append(lut['gm']) elif idx == 2: # type = 'csf' = '0' in nighres segmentation output.append(lut['csf']) output = np.array(output)[:,np.newaxis] return output
1,830
def get_version(pyngrok_config=None): """ Get a tuple with the ``ngrok`` and ``pyngrok`` versions. :param pyngrok_config: A ``pyngrok`` configuration to use when interacting with the ``ngrok`` binary, overriding :func:`~pyngrok.conf.get_default()`. :type pyngrok_config: PyngrokConfig, optional :return: A tuple of ``(ngrok_version, pyngrok_version)``. :rtype: tuple """ if pyngrok_config is None: pyngrok_config = conf.get_default() ngrok_version = process.capture_run_process(pyngrok_config.ngrok_path, ["--version"]).split("version ")[1] return ngrok_version, __version__
1,831
def stat_cleaner(stat: str) -> int: """Cleans and converts single stat. Used for the tweets, followers, following, and likes count sections. Args: stat: Stat to be cleaned. Returns: A stat with commas removed and converted to int. """ return int(stat.replace(",", ""))
1,832
def load_image_ids(img_root, split_dir): """images in the same directory are in the same split""" pathXid = [] img_root = os.path.join(img_root, split_dir) for name in os.listdir(img_root): idx = name.split(".")[0] pathXid.append( ( os.path.join(img_root, name), idx)) if split_dir == 'val2014': print("Place the features of minival in the front of val2014 tsv.") # Put the features of 5000 minival images in front. minival_img_ids = set(json.load(open('data/mscoco_imgfeat/coco_minival_img_ids.json'))) a, b = [], [] for item in pathXid: img_id = item[1] if img_id in minival_img_ids: a.append(item) else: b.append(item) assert len(a) == 5000 assert len(a) + len(b) == len(pathXid) pathXid = a + b assert len(pathXid) == 40504 return pathXid
1,833
def do(ARGV): """Allow to check whether the exception handlers are all in place. """ if len(ARGV) != 3: return False elif ARGV[1] != "<<TEST:Exceptions/function>>" \ and ARGV[1] != "<<TEST:Exceptions/on-import>>": return False if len(ARGV) < 3: return False exception = ARGV[2] if exception == "KeyboardInterrupt": raise KeyboardInterrupt() elif exception == "AssertionError": raise AssertionError() elif exception == "Exception": raise Exception() # If we did not raise an exception here, we didn't do anything print("No exception was triggered.") return False
1,834
def get_available_language_packs(): """Get list of registered language packs. :return list: """ ensure_autodiscover() return [val for (key, val) in registry.registry.items()]
1,835
def _plot_topo_onpick(event, show_func=None, colorbar=False): """Onpick callback that shows a single channel in a new figure""" # make sure that the swipe gesture in OS-X doesn't open many figures orig_ax = event.inaxes if event.inaxes is None: return import matplotlib.pyplot as plt try: ch_idx = orig_ax._mne_ch_idx fig, ax = plt.subplots(1) plt.title(orig_ax._mne_ch_name) ax.set_axis_bgcolor('k') # allow custom function to override parameters show_func(plt, ch_idx) except Exception as err: # matplotlib silently ignores exceptions in event handlers, # so we print # it here to know what went wrong print(err) raise err
1,836
def topo_star(jd_tt, delta_t, star, position, accuracy=0): """ Computes the topocentric place of a star at 'date', given its catalog mean place, proper motion, parallax, and radial velocity. Parameters ---------- jd_tt : float TT Julian date for topocentric place. delta_t : float Difference TT-UT1 at 'date', in seconds of time. star : CatEntry Instance of CatEntry type object containing catalog data for the object in the ICRS. position : OnSurface Instance of OnSurface type object specifying the position of the observer. accuracy : {0, 1}, optional Code specifying the relative accuracy of the output position. = 0 ... full accuracy (default) = 1 ... reduced accuracy Returns ------- (ra, dec) : tuple of floats Topocentric (right ascension in hours, declination in degrees), referred to true equator and equinox of date 'jd_tt'. References ---------- .. [R1] Bangert, J. et. al. (2011), 'User's Guide to NOVAS Version C3.1', C62-C63. .. [R2] Explanatory Supplement to the Astronomical Almanac (1992), Chapter 3. """ if jd_tt < 0.0: raise ValueError(_neg_err.format(name='jd_tt')) if accuracy not in [0, 1]: raise ValueError(_option_err.format(name='accuracy', allowed=[0, 1])) _topo_star = novaslib.topo_star _topo_star.argtypes = (ctypes.c_double, ctypes.c_double, ctypes.POINTER(CatEntry), ctypes.POINTER(OnSurface), ctypes.c_short, ctypes.POINTER(ctypes.c_double), ctypes.POINTER(ctypes.c_double)) _topo_star.restype = ctypes.c_short _topo_star.errcheck = _check_c_errors _topo_star.c_errors = { 1: (ValueError, "from C function 'topo_star': Invalid value of 'where' in ctypes.Structure 'location'"), 11: (ValueError, "from C function 'make_object': invalid value of 'type'"), 12: (ValueError, "from C function 'make_object': 'number' out of range"), 13: (InitializationError, "from C function 'make_object': Initialization of 'cel_obj' failed (object name)."), 14: (InitializationError, "from C function 'make_object': Initialization of 'cel_obj' failed (catalog name)."), 15: (ValueError, "from C function 'make_object': 'name' is out of string bounds."), 21: (ValueError, "from C function 'place': invalid value of 'coord_sys'"), 22: (ValueError, "from C function 'place': invalid value of 'accuracy'"), 23: (ValueError, "from C function 'place': Earth is the observed object, and the observer is either at the geocenter or on the Earth's surface (not permitted)") } ra = ctypes.c_double() dec = ctypes.c_double() _topo_star(jd_tt, delta_t, ctypes.byref(star), ctypes.byref(position), accuracy, ctypes.byref(ra), ctypes.byref(dec)) return (ra.value, dec.value)
1,837
def py_multiplicative_inverse(a, n): """Multiplicative inverse of a modulo n (in Python). Implements extended Euclidean algorithm. Args: a: int-like np.ndarray. n: int. Returns: Multiplicative inverse as an int32 np.ndarray with same shape as a. """ batched_a = np.asarray(a, dtype=np.int32) n = np.asarray(n, dtype=np.int32) batched_inverse = [] for a in np.nditer(batched_a): inverse = 0 new_inverse = 1 remainder = n new_remainder = a while new_remainder != 0: quotient = remainder // new_remainder (inverse, new_inverse) = (new_inverse, inverse - quotient * new_inverse) (remainder, new_remainder) = (new_remainder, remainder - quotient * new_remainder) if remainder > 1: raise ValueError( 'Inverse for {} modulo {} does not exist.'.format(a, n)) if inverse < 0: inverse += n batched_inverse.append(inverse) return np.asarray(batched_inverse, dtype=np.int32).reshape(batched_a.shape)
1,838
def _get_simconffile(args): """ Get experiment config file name from command line """ logger = logging.getLogger('fms') try: simconffile = args[1] except IndexError: logger.critical("Missing simulation config file name.") sys.exit(2) return simconffile
1,839
def resample_nearest_neighbour(input_tif, extents, new_res, output_file): """ Nearest neighbor resampling and cropping of an image. :param str input_tif: input geotiff file path :param list extents: new extents for cropping :param float new_res: new resolution for resampling :param str output_file: output geotiff file path :return: dst: resampled image :rtype: ndarray """ dst, resampled_proj, src, _ = _crop_resample_setup(extents, input_tif, new_res, output_file) # Do the work gdal.ReprojectImage(src, dst, '', resampled_proj, gdalconst.GRA_NearestNeighbour) return dst.ReadAsArray()
1,840
def test_al_validation_third_digit_corresponds_to_type_of_company(): """Test if invalid when the third digit is different to 0_3_5_7_8""" invalid_number = '172030964' assert al.start(invalid_number) == False
1,841
def AptInstall(vm): """Installs the Mellanox OpenFabrics driver on the VM.""" _Install(vm)
1,842
def harvester_api_info(request, name): """ This function returns the pretty rendered api help text of an harvester. """ harvester = get_object_or_404(Harvester, name=name) api = InitHarvester(harvester).get_harvester_api() response = api.api_infotext() content = response.data[harvester.name].replace('\n', '<br>') return HttpResponse(content, content_type='text/plain')
1,843
def init_db(): """Open SQLite database, create facebook table, return connection.""" db = sqlite3.connect('facebook.sql') cur = db.cursor() cur.execute(SQL_CREATE) db.commit() cur.execute(SQL_CHECK) parse = list(cur.fetchall())[0][0] == 0 return db, cur, parse
1,844
def prime_gen(): """Returns prime based on 172 bit range. Results is 44 char""" x = subprocess.run( ['openssl', 'prime', '-generate', '-bits', '172', '-hex'], stdout=subprocess.PIPE) return x.stdout[:-1]
1,845
def aggregate_gradients_using_copy_with_variable_colocation( tower_grads, use_mean, check_inf_nan): """Aggregate gradients, colocating computation with the gradient's variable. Args: tower_grads: List of lists of (gradient, variable) tuples. The outer list is over towers. The inner list is over individual gradients. All variables of the same gradient across towers must be the same (that is, tower_grads[x][a][1] == tower_grads[y][a][1] for all indices x, y, and a) use_mean: if True, mean is taken, else sum of gradients is taken. check_inf_nan: If true, check grads for nans and infs. Returns: The tuple ([(average_gradient, variable),], has_nan_or_inf) where the gradient has been averaged across all towers. The variable is chosen from the first tower. The has_nan_or_inf indicates the grads has nan or inf. """ agg_grads = [] has_nan_or_inf_list = [] for single_grads in zip(*tower_grads): # Note that each single_grads looks like the following: # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN)) var = single_grads[0][1] for _, v in single_grads: assert v == var with tf.device(var.device): grad_and_var, has_nan_or_inf = aggregate_single_gradient_using_copy( single_grads, use_mean, check_inf_nan) agg_grads.append(grad_and_var) has_nan_or_inf_list.append(has_nan_or_inf) if check_inf_nan: return agg_grads, tf.reduce_any(has_nan_or_inf_list) else: return agg_grads, None
1,846
def module_for_category( category ): """Return the OpenGL.GL.x module for the given category name""" if category.startswith( 'VERSION_' ): name = 'OpenGL.GL' else: owner,name = category.split( '_',1) if owner.startswith( '3' ): owner = owner[1:] name = 'OpenGL.GL.%s.%s'%( owner,name ) return __import__( name, {}, {}, name.split( '.' ))
1,847
def check_file_location(file_path, function, file_ext='', exists=False): """Function to check whether a file exists and has the correct file extension""" folder, file, ext = '', '', '' if file_path == '': exit_prompt('Error: Could not parse path to {} file'.format(function)) try: file, ext = os.path.splitext(os.path.basename(file_path)) folder = os.path.dirname(file_path) except: exit_prompt('Error: Could not parse path to {} file'.format(function)) if file_ext != '' and ext != file_ext: exit_prompt('Error: The {} file should have the extension {}'.format(function, file_ext)) if exists and not os.path.isfile(os.path.join(folder, file + ext)): exit_prompt('Error: The specified {} file cannot be found'.format(function)) return folder, file, ext
1,848
def timestamp() -> str: """generate formatted timestamp for the invocation moment""" return dt.now().strftime("%d-%m-%Y %H:%M:%S")
1,849
def test_serder(): """ Test the support functionality for Serder key event serialization deserialization """ with pytest.raises(ValueError): serder = Serder() e1 = dict(vs=Vstrings.json, pre="ABCDEFG", sn="0001", ilk="rot") serder = Serder(ked=e1) assert serder.ked == e1 assert serder.kind == Serials.json assert serder.version == Versionage(major=1, minor=0) assert serder.dig == 'EaDVEkrFdx8W0ZZAsfwf9mjxhgBt6PvfCmFPdr7RIcfY' assert serder.digb == b'EaDVEkrFdx8W0ZZAsfwf9mjxhgBt6PvfCmFPdr7RIcfY' assert serder.size == 66 assert serder.verfers == [] assert serder.raw == b'{"vs":"KERI10JSON000042_","pre":"ABCDEFG","sn":"0001","ilk":"rot"}' e1s = json.dumps(e1, separators=(",", ":"), ensure_ascii=False).encode("utf-8") assert e1s == b'{"vs":"KERI10JSON000042_","pre":"ABCDEFG","sn":"0001","ilk":"rot"}' vs = Versify(kind=Serials.json, size=len(e1s)) # use real length assert vs == 'KERI10JSON000042_' e1["vs"] = vs # has real length e1s = json.dumps(e1, separators=(",", ":"), ensure_ascii=False).encode("utf-8") with pytest.raises(ShortageError): # test too short kind1, vers1, size1 = serder._sniff(e1s[:VERFULLSIZE]) kind1, vers1, size1 = serder._sniff(e1s[:MINSNIFFSIZE]) assert kind1 == Serials.json assert size1 == 66 kind1, vers1, size1 = serder._sniff(e1s) assert kind1 == Serials.json assert size1 == 66 e1ss = e1s + b'extra attached at the end.' ked1, knd1, vrs1, siz1 = serder._inhale(e1ss) assert ked1 == e1 assert knd1 == kind1 assert vrs1 == vers1 assert siz1 == size1 with pytest.raises(ShortageError): # test too short ked1, knd1, vrs1, siz1 = serder._inhale(e1ss[:size1-1]) raw1, knd1, ked1, ver1 = serder._exhale(ked=e1) assert raw1 == e1s assert knd1 == kind1 assert ked1 == e1 assert vrs1 == vers1 e2 = dict(e1) e2["vs"] = Vstrings.mgpk e2s = msgpack.dumps(e2) assert e2s == b'\x84\xa2vs\xb1KERI10MGPK000000_\xa3pre\xa7ABCDEFG\xa2sn\xa40001\xa3ilk\xa3rot' vs = Versify(kind=Serials.mgpk, size=len(e2s)) # use real length assert vs == 'KERI10MGPK000032_' e2["vs"] = vs # has real length e2s = msgpack.dumps(e2) with pytest.raises(ShortageError): # test too short kind2, vers2, size2 = serder._sniff(e2s[:VERFULLSIZE]) kind2, vers2, size2 = serder._sniff(e2s[:MINSNIFFSIZE]) assert kind2 == Serials.mgpk assert size2 == 50 kind2, vers2, size2 = serder._sniff(e2s) assert kind2 == Serials.mgpk assert size2 == 50 e2ss = e2s + b'extra attached at the end.' ked2, knd2, vrs2, siz2 = serder._inhale(e2ss) assert ked2 == e2 assert knd2 == kind2 assert vrs2 == vers2 assert siz2 == size2 with pytest.raises(ShortageError): # test too short ked2, knd2, vrs2, siz2 = serder._inhale(e2ss[:size2-1]) raw2, knd2, ked2, ver2 = serder._exhale(ked=e2) assert raw2 == e2s assert knd2 == kind2 assert ked2 == e2 assert vrs2 == vers2 e3 = dict(e1) e3["vs"] = Vstrings.cbor e3s = cbor.dumps(e3) assert e3s == b'\xa4bvsqKERI10CBOR000000_cpregABCDEFGbsnd0001cilkcrot' vs = Versify(kind=Serials.cbor, size=len(e3s)) # use real length assert vs == 'KERI10CBOR000032_' e3["vs"] = vs # has real length e3s = cbor.dumps(e3) with pytest.raises(ShortageError): # test too short kind3, vers3, size3 = serder._sniff(e3s[:VERFULLSIZE]) kind3, vers3, size3 = serder._sniff(e3s[:MINSNIFFSIZE]) assert kind3 == Serials.cbor assert size3 == 50 kind3, vers3, size3 = serder._sniff(e3s) assert kind3 == Serials.cbor assert size3 == 50 e3ss = e3s + b'extra attached at the end.' ked3, knd3, vrs3, siz3 = serder._inhale(e3ss) assert ked3 == e3 assert knd3 == kind3 assert vrs3 == vers3 assert siz3 == size3 with pytest.raises(ShortageError): # test too short ked3, knd3, vrs3, siz3 = serder._inhale(e3ss[:size3-1]) raw3, knd3, ked3, ver3 = serder._exhale(ked=e3) assert raw3 == e3s assert knd3 == kind3 assert ked3 == e3 assert vrs3 == vers3 evt1 = Serder(raw=e1ss) assert evt1.kind == kind1 assert evt1.raw == e1s assert evt1.ked == ked1 assert evt1.size == size1 assert evt1.raw == e1ss[:size1] assert evt1.version == vers1 # test digest properties .diger and .dig assert evt1.diger.qb64 == evt1.dig assert evt1.diger.code == CryOneDex.Blake3_256 assert len(evt1.diger.raw) == 32 assert len(evt1.dig) == 44 assert len(evt1.dig) == CryOneSizes[CryOneDex.Blake3_256] assert evt1.dig == 'EaDVEkrFdx8W0ZZAsfwf9mjxhgBt6PvfCmFPdr7RIcfY' assert evt1.diger.verify(evt1.raw) evt1 = Serder(ked=ked1) assert evt1.kind == kind1 assert evt1.raw == e1s assert evt1.ked == ked1 assert evt1.size == size1 assert evt1.raw == e1ss[:size1] assert evt1.version == vers1 evt2 = Serder(raw=e2ss) assert evt2.kind == kind2 assert evt2.raw == e2s assert evt2.ked == ked2 assert evt2.version == vers2 evt2 = Serder(ked=ked2) assert evt2.kind == kind2 assert evt2.raw == e2s assert evt2.ked == ked2 assert evt2.size == size2 assert evt2.raw == e2ss[:size2] assert evt2.version == vers2 evt3 = Serder(raw=e3ss) assert evt3.kind == kind3 assert evt3.raw == e3s assert evt3.ked == ked3 assert evt3.version == vers3 evt3 = Serder(ked=ked3) assert evt3.kind == kind3 assert evt3.raw == e3s assert evt3.ked == ked3 assert evt3.size == size3 assert evt3.raw == e3ss[:size3] assert evt3.version == vers3 # round trip evt2 = Serder(ked=evt1.ked) assert evt2.kind == evt1.kind assert evt2.raw == evt1.raw assert evt2.ked == evt1.ked assert evt2.size == evt1.size assert evt2.version == vers2 # Test change in kind by Serder evt1 = Serder(ked=ked1, kind=Serials.mgpk) # ked is json but kind mgpk assert evt1.kind == kind2 assert evt1.raw == e2s assert evt1.ked == ked2 assert evt1.size == size2 assert evt1.raw == e2ss[:size2] assert evt1.version == vers1 # round trip evt2 = Serder(raw=evt1.raw) assert evt2.kind == evt1.kind assert evt2.raw == evt1.raw assert evt2.ked == evt1.ked assert evt2.size == evt1.size assert evt2.version == vers2 evt1 = Serder(ked=ked1, kind=Serials.cbor) # ked is json but kind mgpk assert evt1.kind == kind3 assert evt1.raw == e3s assert evt1.ked == ked3 assert evt1.size == size3 assert evt1.raw == e3ss[:size3] assert evt1.version == vers1 # round trip evt2 = Serder(raw=evt1.raw) assert evt2.kind == evt1.kind assert evt2.raw == evt1.raw assert evt2.ked == evt1.ked assert evt2.size == evt1.size assert evt2.version == vers2 # use kind setter property assert evt2.kind == Serials.cbor evt2.kind = Serials.json assert evt2.kind == Serials.json knd, version, size = Deversify(evt2.ked['vs']) assert knd == Serials.json """Done Test """
1,850
def sde(trains, events=None, start=0 * pq.ms, stop=None, kernel_size=100 * pq.ms, optimize_steps=0, minimum_kernel=10 * pq.ms, maximum_kernel=500 * pq.ms, kernel=None, time_unit=pq.ms, progress=None): """ Create a spike density estimation plot. The spike density estimations give an estimate of the instantaneous rate. Optionally finds optimal kernel size for given data. :param dict trains: A dictionary of :class:`neo.core.SpikeTrain` lists. :param dict events: A dictionary (with the same indices as ``trains``) of Event objects or lists of Event objects. In case of lists, the first event in the list will be used for alignment. The events will be at time 0 on the plot. If None, spike trains are used unmodified. :param start: The desired time for the start of the first bin. It will be recalculated if there are spike trains which start later than this time. This parameter can be negative (which could be useful when aligning on events). :type start: Quantity scalar :param stop: The desired time for the end of the last bin. It will be recalculated if there are spike trains which end earlier than this time. :type stop: Quantity scalar :param kernel_size: A uniform kernel size for all spike trains. Only used if optimization of kernel sizes is not used (i.e. ``optimize_steps`` is 0). :type kernel_size: Quantity scalar :param int optimize_steps: The number of different kernel sizes tried between ``minimum_kernel`` and ``maximum_kernel``. If 0, ``kernel_size`` will be used. :param minimum_kernel: The minimum kernel size to try in optimization. :type minimum_kernel: Quantity scalar :param maximum_kernel: The maximum kernel size to try in optimization. :type maximum_kernel: Quantity scalar :param kernel: The kernel function or instance to use, should accept two parameters: A ndarray of distances and a kernel size. The total area under the kernel function should be 1. Automatic optimization assumes a Gaussian kernel and will likely not produce optimal results for different kernels. Default: Gaussian kernel :type kernel: func or :class:`spykeutils.signal_processing.Kernel` :param Quantity time_unit: Unit of X-Axis. :param progress: Set this parameter to report progress. :type progress: :class:`spykeutils.progress_indicator.ProgressIndicator` """ if not progress: progress = ProgressIndicator() start.units = time_unit if stop: stop.units = time_unit kernel_size.units = time_unit minimum_kernel.units = time_unit maximum_kernel.units = time_unit if kernel is None: kernel = signal_processing.GaussianKernel(100 * pq.ms) # Align spike trains for u in trains: if events: trains[u] = rate_estimation.aligned_spike_trains( trains[u], events) # Calculate spike density estimation if optimize_steps: steps = sp.logspace(sp.log10(minimum_kernel), sp.log10(maximum_kernel), optimize_steps) * time_unit sde, kernel_size, eval_points = \ rate_estimation.spike_density_estimation( trains, start, stop, optimize_steps=steps, kernel=kernel, progress=progress) else: sde, kernel_size, eval_points = \ rate_estimation.spike_density_estimation( trains, start, stop, kernel_size=kernel_size, kernel=kernel, progress=progress) progress.done() if not sde: raise SpykeException('No spike trains for SDE!') # Plot win_title = 'Kernel Density Estimation' win = PlotDialog(toolbar=True, wintitle=win_title) pW = BaseCurveWidget(win) plot = pW.plot plot.set_antialiasing(True) for u in trains: if u and u.name: name = u.name else: name = 'Unknown' curve = make.curve( eval_points, sde[u], title='%s, Kernel width %.2f %s' % (name, kernel_size[u], time_unit.dimensionality.string), color=helper.get_object_color(u)) plot.add_item(curve) plot.set_axis_title(BasePlot.X_BOTTOM, 'Time') plot.set_axis_unit(BasePlot.X_BOTTOM, eval_points.dimensionality.string) plot.set_axis_title(BasePlot.Y_LEFT, 'Rate') plot.set_axis_unit(BasePlot.Y_LEFT, 'Hz') l = make.legend() plot.add_item(l) win.add_plot_widget(pW, 0) win.add_custom_curve_tools() win.add_legend_option([l], True) win.show() return win
1,851
def content(obj): """Strip HTML tags for list display.""" return strip_tags(obj.content.replace('</', ' </'))
1,852
def main(): """ main() """ action = get_cmd_param(1) if action == 'soup': keyword = get_cmd_param( 2, 'https://tw.news.appledaily.com/local/realtime/20181025/1453825') soup(keyword) elif action == 'search': keyword = get_cmd_param(2, '酒駕') channel = get_cmd_param(3, 'appledaily') search_and_list(keyword, channel) elif action == 'snsp': keyword = get_cmd_param(2, '酒駕') channel = get_cmd_param(3, 'appledaily') search_and_soup(keyword, channel) elif action == 'sncp': keyword = get_cmd_param(2, '酒駕') search_and_compare_performance(keyword) elif action == 'cpkw': keyword = get_cmd_param(2, '酒駕') compare_keyword(keyword) else: if action != 'help': print('動作名稱錯誤') print() usage()
1,853
def flux(Q, N, ne, Ap, Am): """ calculates the flux between two boundary sides of connected elements for element i """ # for every element we have 2 faces to other elements (left and right) out = np.zeros((ne, N + 1, 2)) # Calculate Fluxes inside domain for i in range(1, ne - 1): out[i, 0, :] = Ap @ (-Q[i - 1, N, :]) + Am @ (-Q[i, 0, :]) out[i, N, :] = Ap @ (Q[i, N, :]) + Am @ (Q[i + 1, 0, :]) # Boundaries # Left out[0, 0, :] = Ap @ np.array([0, 0]) + Am @ (-Q[i, 0, :]) out[0, N, :] = Ap @ (Q[0, N, :]) + Am @ (Q[1, 0, :]) # Right out[ne - 1, 0, :] = Ap @ (-Q[ne - 2, N, :]) + Am @ (-Q[ne - 1, 0, :]) out[ne - 1, N, :] = Ap @ (Q[ne - 1, N, :]) + Am @ np.array([0, 0]) return out
1,854
def cp_dir(src_dir, dest_dir): """Function: cp_dir Description: Copies a directory from source to destination. Arguments: (input) src_dir -> Source directory. (input) dest_dir -> Destination directory. (output) status -> True|False - True if copy was successful. (output) err_msg -> Error message from copytree exception or None. """ status = True err_msg = None try: shutil.copytree(src_dir, dest_dir) # Directory permission error. except shutil.Error as err: err_msg = "Directory not copied. Perms Error Message: %s" % (err) status = False # Directory does not exist. except OSError as err: err_msg = "Directory not copied. Exist Error Message: %s" % (err) status = False return status, err_msg
1,855
def listnet_loss(y_i, z_i): """ y_i: (n_i, 1) z_i: (n_i, 1) """ P_y_i = F.softmax(y_i, dim=0) P_z_i = F.softmax(z_i, dim=0) return - torch.sum(y_i * torch.log(P_z_i))
1,856
def get_volume_parameters(volumes): """Create pipeline parameters for volumes to be mounted on pipeline steps. Args: volumes: a volume spec Returns (dict): volume pipeline parameters """ volume_parameters = dict() for v in volumes: if v['type'] == 'pv': # FIXME: How should we handle existing PVs? continue if v['type'] == 'pvc': mount_point = v['mount_point'].replace('/', '_').strip('_') par_name = "vol_{}".format(mount_point) volume_parameters[par_name] = ('str', v['name']) elif v['type'] == 'new_pvc': rok_url = v['annotations'].get("rok/origin") if rok_url is not None: par_name = "rok_{}_url".format(v['name'].replace('-', '_')) volume_parameters[par_name] = ('str', rok_url) else: raise ValueError("Unknown volume type: {}".format(v['type'])) return volume_parameters
1,857
def _process_active_tools(toolbar, tool_map, active_drag, active_inspect, active_scroll, active_tap): """ Adds tools to the plot object Args: toolbar (Toolbar): instance of a Toolbar object tools_map (dict[str]|Tool): tool_map from _process_tools_arg active_drag (str or Tool): the tool to set active for drag active_inspect (str or Tool): the tool to set active for inspect active_scroll (str or Tool): the tool to set active for scroll active_tap (str or Tool): the tool to set active for tap Returns: None Note: This function sets properties on Toolbar """ if active_drag in ['auto', None] or isinstance(active_drag, Tool): toolbar.active_drag = active_drag elif active_drag in tool_map: toolbar.active_drag = tool_map[active_drag] else: raise ValueError("Got unknown %r for 'active_drag', which was not a string supplied in 'tools' argument" % active_drag) if active_inspect in ['auto', None] or isinstance(active_inspect, Tool) or all([isinstance(t, Tool) for t in active_inspect]): toolbar.active_inspect = active_inspect elif active_inspect in tool_map: toolbar.active_inspect = tool_map[active_inspect] else: raise ValueError("Got unknown %r for 'active_inspect', which was not a string supplied in 'tools' argument" % active_scroll) if active_scroll in ['auto', None] or isinstance(active_scroll, Tool): toolbar.active_scroll = active_scroll elif active_scroll in tool_map: toolbar.active_scroll = tool_map[active_scroll] else: raise ValueError("Got unknown %r for 'active_scroll', which was not a string supplied in 'tools' argument" % active_scroll) if active_tap in ['auto', None] or isinstance(active_tap, Tool): toolbar.active_tap = active_tap elif active_tap in tool_map: toolbar.active_tap = tool_map[active_tap] else: raise ValueError("Got unknown %r for 'active_tap', which was not a string supplied in 'tools' argument" % active_tap)
1,858
def normalize(data, **kw): """Calculates the normalization of the given array. The normalizated array is returned as a different array. Args: data The data to be normalized Kwargs: upper_bound The upper bound of the normalization. It has the value of 1 by default. lower_bound The lower bound to be used for normalization. It has the value of 0 by default dtype The type of the returned ndarray. If the dtype given is an integer type the returned array values will be truncated after normalized. Returns: An instance of np.array with normalizated values """ upper_bound = 1 lower_bound = 0 dtype = np.float64 if 'upper_bound' in kw: upper_bound = kw['upper_bound'] if 'lower_bound' in kw: lower_bound = kw['lower_bound'] if 'dtype' in kw: dtype = kw['dtype'] check_ndarray(data) newdata = data - data.min() newdata = newdata / newdata.max() newdata = newdata * (upper_bound - lower_bound) newdata += lower_bound return newdata.astype(dtype)
1,859
def relative_sse(cp_tensor, X, sum_squared_X=None): """Compute the relative sum of squared error for a given cp_tensor. Parameters ---------- cp_tensor : CPTensor or tuple TensorLy-style CPTensor object or tuple with weights as first argument and a tuple of components as second argument X : ndarray Tensor approximated by ``cp_tensor`` sum_squared_X: float (optional) If ``sum(X**2)`` is already computed, you can optionally provide it using this argument to avoid unnecessary recalculation. Returns ------- float The relative sum of squared error, ``sum((X_hat - X)**2)/sum(X**2)``, where ``X_hat`` is the dense tensor represented by ``cp_tensor`` Examples -------- Below, we create a random CP tensor and a random tensor and compute the sum of squared error for these two tensors. >>> import tensorly as tl >>> from tensorly.random import random_cp >>> from component_vis.model_evaluation import relative_sse >>> rng = tl.check_random_state(0) >>> cp = random_cp((4, 5, 6), 3, random_state=rng) >>> X = rng.random_sample((4, 5, 6)) >>> relative_sse(cp, X) 0.4817407254961442 """ # TODO: tests for relative_sse if sum_squared_X is None: sum_squared_x = np.sum(X ** 2) return sse(cp_tensor, X) / sum_squared_x
1,860
def setup_ceilometer_compute(): """Provisions ceilometer compute services in all nodes defined in compute role.""" if env.roledefs['compute']: execute("setup_ceilometer_compute_node", env.host_string)
1,861
def set_elixier_item_status(item, status): """ .. aendert den Status des Elixier-Beitrags """ #id = item.id_local item.status = status item.save()
1,862
def test_enamble_stat_datatype(data): """ make sure, that the wron data type will rais an exception """ with pytest.raises(ValueError): _ = ensemble_stat(data.values)
1,863
def cc_across_time(tfx, tfy, cc_func, cc_args=()): """Cross correlations across time. Args: tfx : time-frequency domain signal 1 tfy : time-frequency domain signal 2 cc_func : cross correlation function. cc_args : list of extra arguments of cc_func. Returns: cc_atime : cross correlation at different time. Note: If tfx and tfy are not of the same length, the result will be truncated to the shorter one. """ return np.array([cc_func(x, y, *cc_args) for x, y in zip(tfx, tfy)])
1,864
def transcribe(args, client, site_id): """Transcribe one or more WAV files using hermes/asr""" from .asr import AsrStartListening, AsrStopListening, AsrTextCaptured from .audioserver import AudioFrame client.subscribe(AsrTextCaptured.topic()) frame_topic = AudioFrame.topic(site_id=site_id) if args.wav_file: # Read WAV paths from command-line arguments def get_wavs(): for wav_path in args.wav_file: wav_path = Path(wav_path) with open(wav_path, "rb") as wav_file: yield str(wav_path), wav_file elif args.stdin_files: # Read WAV paths from stdin (one per line) def get_wavs(): for wav_path in sys.stdin: wav_path = Path(wav_path.strip()) with open(wav_path, "rb") as wav_file: yield str(wav_path), wav_file else: # Read WAV data from stdin def get_wavs(): if os.isatty(sys.stdin.fileno()): print("Reading WAV data from stdin...", file=sys.stderr) wav_bytes = sys.stdin.buffer.read() with io.BytesIO(wav_bytes) as wav_file: yield "<stdin>", wav_file for wav_name, wav_file in get_wavs(): _LOGGER.debug("Transcribing %s", wav_name) done_event = threading.Event() result_topic = "" text_captured = None session_id = str(uuid4()) def on_message(client, userdata, msg): nonlocal result_topic, text_captured try: if msg.topic == AsrTextCaptured.topic(): # Verify site_id/session_id json_payload = json.loads(msg.payload) if check_site_id(args, json_payload) and ( json_payload.get("session_id", "") == session_id ): # Matched result_topic = msg.topic text_captured = AsrTextCaptured(**json_payload) done_event.set() except Exception: _LOGGER.exception("transcribe.on_message") client.on_message = on_message with wav_file: # startListening publish(client, AsrStartListening(site_id=site_id, session_id=session_id)) # Send WAV chunks (audioFrame) for wav_chunk in AudioFrame.iter_wav_chunked( wav_file, frames_per_chunk=2048 ): client.publish(frame_topic, wav_chunk) # stopListening publish(client, AsrStopListening(site_id=site_id, session_id=session_id)) _LOGGER.debug( "Waiting for textCaptured (%s, session_id=%s)", wav_name, session_id ) # Wait for textCaptured done_event.wait() # Print result assert text_captured is not None print_json(args, result_topic, text_captured)
1,865
def predict_encoding(file_path, n_lines=20): """Predict a file's encoding using chardet""" import chardet # Open the file as binary data with open(file_path, "rb") as f: # Join binary lines for specified number of lines rawdata = b"".join([f.readline() for _ in range(n_lines)]) return chardet.detect(rawdata)["encoding"]
1,866
def redirect_handler(url, client_id, client_secret, redirect_uri, scope): """ Convenience redirect handler. Provide the redirect url (containing auth code) along with client credentials. Returns a spotify access token. """ auth = ExtendedOAuth( client_id, client_secret, redirect_uri, scope=scope) code = auth.parse_response_code(url) token = auth.get_access_token(code) return token
1,867
def convert_coordinate(coordinate): """ :param coordinate: str - a string map coordinate :return: tuple - the string coordinate seperated into its individual components. """ coord = (coordinate[0], coordinate[1]) return coord
1,868
def tree_falls(geo_index, shps, CHMs,savedir="."): """Find predictions that don't match accross years, where there is significant height drop among years geo_index: NEON geoindex to process shps: List of shapefiles to search CHMs: List of canopy height models to search """ #Find matching shapefiles matched_shps = [x for x in shps if geo_index in x] #Load shapefiles shapefiles = {} for shp in matched_shps: #Load data and give it site and year and tile labels df = geopandas.read_file(shp) geo_index = re.search("(\d+_\d+)_image",shp).group(1) df["shp_path"] = shp df["geo_index"] = geo_index df["Year"] = re.search("(\d+)_(\w+)_\d_\d+_\d+_image.shp",shp).group(1) df["Site"] = re.search("(\d+)_(\w+)_\d_\d+_\d+_image.shp",shp).group(2) shapefiles[df["Year"].unique()[0]] = df #Difference in counts mean_difference_among_years = difference_in_count(shapefiles) #Join to find predictions that don't match joined_boxes = sjoin(shapefiles["2018"],shapefiles["2019"]) no_matches = shapefiles["2018"][~(shapefiles["2018"].index.isin(joined_boxes.index))] #For each tree that does not match, check the 2019 height CHM = lookup_CHM_path(shapefiles["2018"]["shp_path"].unique()[0], CHMs) if not os.path.exists(CHM): raise IOError("{} does not exist".format(CHM)) draped_2019 = rasterstats.zonal_stats(no_matches, CHM, stats="mean") no_matches["2019_height"] = [x["mean"] for x in draped_2019] #Keep predictions whose mean height dropped by more than 50% no_matches["height_frac"] = (no_matches["2019_height"] - no_matches["height"]) / no_matches["height"] fall_df = no_matches[no_matches["height_frac"] < -0.5] #Keep predictions whose original height was greater than 5m #fall_df = fall_df[fall_df.height > 5] #Write tree fall shapefile fname = os.path.basename(shapefiles["2019"]["shp_path"].unique()[0]) fname = os.path.splitext(fname)[0] fname = "{}/{}_treefall.shp".format(savedir,fname) fall_df.to_file(fname) #get predictions whose height did not drop by more than 50%, indiciating poor matching non_fall_df = no_matches[~(no_matches["height_frac"] < -0.5)] #Keep predictions whose original height was greater than 5m #fall_df = fall_df[fall_df.height > 5] #Write tree fall shapefile fname = os.path.basename(shapefiles["2019"]["shp_path"].unique()[0]) fname = os.path.splitext(fname)[0] fname = "{}/{}_incorrect_treefall.shp".format(savedir,fname) non_fall_df.to_file(fname) #Stablity metrics #Proportion not matched compared to the earliest year p_without_match = non_fall_df.shape[0]/shapefiles["2018"].shape[0] metrics = pd.DataFrame({"Mean_Count_Difference":mean_difference_among_years,"p_without_match":p_without_match}) fname = os.path.basename(shapefiles["2019"]["shp_path"].unique()[0]) fname = os.path.splitext(fname)[0] fname = "{}/{}_metrics.csv".format(savedir,fname) metrics.to_csv(fname) return fname
1,869
def split_multi_fasta_into_fasta(fasta_file, output_directory): """ Splits a single multi-FASTA-format file into individual FASTA-format files, each containing only one FASTA record. PARAMETERS fasta_file (str): the file location of the FASTA file output_directory (str): the output directory to place all of the individual FASTA files RETURNS file_list (list(str)): a list of the locations of the written FASTA files in descending order of sequence length POST The output directory will contain a number of FASTA files equal to the number of FASTA records in the multi-FASTA-format file provided to this function. """ count = 0 file_list = [] if not os.path.exists(fasta_file): raise FileNotFoundError("File not found: " + fasta_file) if not os.path.isdir(output_directory): os.mkdir(output_directory) with open(fasta_file) as file: for line in file: # FASTA record header if line.startswith(">"): output_file = os.path.join(output_directory, str(count) + ".fasta") output = open(output_file, "w") count += 1 information = [output_file, 0] # [filename, number of sequence characters] file_list.append(information) output.write(line) # FASTA record sequence else: output.write(line) file_list[len(file_list) - 1][1] += len(line) # last item in list, increment sequence characters file_list.sort(key=lambda filename: filename[1], reverse=True) # Keep only filenames (not number of characters) for i in range(len(file_list)): file_list[i] = file_list[i][0] return file_list
1,870
def get_noun_phrases(doc: Doc) -> List[Span]: """Compile a list of noun phrases in sense2vec's format (without determiners). Separated out to make it easier to customize, e.g. for languages that don't implement a noun_chunks iterator out-of-the-box, or use different label schemes. doc (Doc): The Doc to get noun phrases from. RETURNS (list): The noun phrases as a list of Span objects. """ trim_labels = ("advmod", "amod", "compound") spans = [] if doc.is_parsed: for np in doc.noun_chunks: while len(np) > 1 and np[0].dep_ not in trim_labels: np = np[1:] spans.append(np) return spans
1,871
def load_file_from_url(url): """Load the data from url.""" url_path = get_absolute_url_path(url, PATH) response = urlopen(url_path) contents = json.loads(response.read()) return parse_file_contents(contents, url_path.endswith(".mrsys"))
1,872
def speedPunisherMin(v, vmin): """ :param v: :param vmin: :return: """ x = fmin(v - vmin, 0) return x ** 2
1,873
def initialize_ecr_client(): """ Initializes the ECR CLient. If running in Lambda mode, only the AWS REGION environment variable is needed. If not running in Lambda mode, the AWS credentials are also needed. """ if(os.environ.get('MODE') == 'lambda'): ecrClient = boto3.client('ecr', region_name = os.environ.get('AWS_REGION')) else: ecrClient = boto3.client('ecr', aws_access_key_id = os.environ.get('AWS_ACCESS_KEY_ID'), aws_secret_access_key = os.environ.get('AWS_SECRET_ACCESS_KEY'), region_name = os.environ.get('AWS_REGION') ) return ecrClient
1,874
def hexagonal_packing_cross_section(nseeds, Areq, insu, out_insu): """ Make a hexagonal packing and scale the result to be Areq cross section Parameter insu must be a percentage of the strand radius. out_insu is the insulation thickness around the wire as meters Returns: (wire diameter, strand diameter, strand center points) """ seeds = np.linspace(-0.5, 0.5,nseeds) dx = seeds[1]-seeds[0] xs, ys = np.meshgrid(seeds, seeds) if (nseeds-1) % 4 == 0: ys[:,1::2] = ys[:,1::2] + 0.5*dx; else: ys[:,0::2] = ys[:,0::2] + 0.5*dx; ys = ys*2/np.sqrt(3); points = np.stack([xs.reshape(-1), ys.reshape(-1)], axis=1) vor = Voronoi(points) hexs = [v for v in vor.regions if len(v) == 6] all_cells = vor.vertices[hexs, :] max_dists = np.max(np.linalg.norm(all_cells, axis=2), axis=1) cells = all_cells[max_dists < 0.5, :] strand_cps = np.mean(cells, axis=1) # if strand bundle is not symmetric, it will be off center so... # move it back to center strand_cps = strand_cps - np.mean(strand_cps, axis=0) # quite a silly way to calculate the strand diameter.but it indeed is # the minimum of the distances from the first cell center to all the rest # minus the insulation thickness strand_diam = np.min(np.linalg.norm(strand_cps[1:]-strand_cps[0], axis=1))*(1-insu) nstrands = len(strand_cps) Acu = nstrands*(strand_diam/2)**2*np.pi scale = np.sqrt(Areq/Acu) strand_cps_scaled = scale*strand_cps strand_diam_scaled = scale*strand_diam wire_diameter = (np.max(np.linalg.norm(strand_cps_scaled, axis=1), axis=0)*2 + strand_diam_scaled*(1+insu)/(1-insu) + out_insu) return wire_diameter, strand_diam_scaled, strand_cps_scaled
1,875
def bk(): """ Returns an RGB object representing a black pixel. This function is created to make smile() more legible. """ return introcs.RGB(0,0,0)
1,876
def autoEpochToTime(epoch): """ Converts a long offset from Epoch value to a DBDateTime. This method uses expected date ranges to infer whether the passed value is in milliseconds, microseconds, or nanoseconds. Thresholds used are TimeConstants.MICROTIME_THRESHOLD divided by 1000 for milliseconds, as-is for microseconds, and multiplied by 1000 for nanoseconds. The value is tested to see if its ABS exceeds the threshold. E.g. a value whose ABS is greater than 1000 * TimeConstants.MICROTIME_THRESHOLD will be treated as nanoseconds. :param epoch: (long) - The long Epoch offset value to convert. :return: (io.deephaven.db.tables.utils.DBDateTime) null, if the input is equal to QueryConstants.NULL_LONG, otherwise a DBDateTime based on the inferred conversion. """ return _java_type_.autoEpochToTime(epoch)
1,877
def _write(fname, reqs): """Dump requirements back to a file in sorted format.""" reqs = sorted(reqs, key=sort_key) with open(fname, "w") as f: for r in reqs: f.write(f"{r}\n")
1,878
def compile_recursive_descent(file_lines, *args, **kwargs): """Given a file and its lines, recursively compile until no ksx statements remain""" visited_files = kwargs.get('visited_files', set()) # calculate a hash of the file_lines and check if we have already compiled # this one file_hash = hash_file_contents(file_lines) if len(visited_files) > RECURSION_DESCENT_LIMIT: msg = ( "Compiler appears to be in a circular reference loop, " "this is currently non-recoverable and is a known issue.\n\n" "See: https://github.com/LeonardMH/kos-scripts/issues/7 \n\n" "In the meantime check your library for files which import a " "file, where that file imports the original (A->B->A).\n\n" "You might also attempt using the 'from x import y' syntax which " "has slightly narrower scope." ) raise CircularImportError(msg) if file_hash in visited_files: # we have already compiled this file, no need to do so again return "" else: # we will now compile the file, mark that it has been visited visited_files.add(file_hash) # compile and split back out to individual lines file_oneline = compile_single_file_lines(file_lines, *args, **kwargs) file_lines = file_oneline.split('\n') # if there are no more ksx directives in the lines compiled we are done, # return the stringified compile result if not file_has_ksx_directive(file_lines): return file_oneline # if there are still more ksx directives in the lines compiled so far, run # again kwargs['visited_files'] = visited_files return compile_recursive_descent(file_lines, *args, **kwargs).rstrip() + '\n'
1,879
def cl_user(client): # pylint: disable=redefined-outer-name """yield client authenticated to role user""" yield client_in_roles(client, ['user'])
1,880
def majority_voting(masks, voting='hard', weights=None, threshold=0.5): """Soft Voting/Majority Rule mask merging; Signature based upon the Scikit-learn VotingClassifier (https://github.com/scikit-learn/scikit-learn/blob/2beed55847ee70d363bdbfe14ee4401438fba057/sklearn/ensemble/_voting.py#L141) Parameters ---------- masks : segmentations masks to merge, ndarray Expected shape is num_of_masks * 1 * h * w Accepts masks in range 0-1 (i.e apply sigmoid before passing to this function) voting : {'hard', 'soft'}, default='hard' If 'hard', uses predicted class labels for majority rule voting. Else if 'soft', predicts the class label based on the argmax of the sums of the predicted probabilities, which is recommended for an ensemble of well-calibrated classifiers. weights : array-like of shape (n_classifiers,), default=None Sequence of weights (`float` or `int`) to weight the occurrences of predicted class labels (`hard` voting) or class probabilities before averaging (`soft` voting). Uses uniform weights if `None`. threshold : for separating between the positive and negative class, default=0.5 Applied first in case of hard voting and applied last in case of soft voting """ assert len(masks.shape) == 4 if voting not in ('soft', 'hard'): raise ValueError(f"Voting must be 'soft' or 'hard'; got (voting= {voting})") for m in masks: assert (m >= 0.).all() and (m <= 1.).all() if voting == 'hard': masks = (masks >= threshold).astype(np.float32) if weights is None: weights = np.array([1] * masks.shape[0]) else: weights = np.array(weights) # Broadcasting starts with the trailing (i.e. rightmost) dimensions and works its way left, therefore we move the "mask" dimension to the right masks= np.transpose(masks, (1, 2, 3, 0)) masks = masks * weights masks= np.transpose(masks, (3, 0, 1, 2)) masks = masks.sum(axis=0) if voting == 'soft': masks = (masks >= (threshold * weights.sum())).astype(np.float32) elif voting == 'hard': # Same as doing a majority vote masks = (masks > (0.5 * weights.sum())).astype(np.float32) assert len(masks.shape) == 3 return masks.astype(np.float32)
1,881
def send_msg(content): """ 发送消息 :param content: 消息内容 :return: """ (url, data, field) = __read_data() if not url or not data: print "url={0}, data={1}".format(url, data) return headers = {'Content-Type': 'application/json'} json_content = json.loads(data) json_content[field] = content print json_content response = requests.post( url=url, headers=headers, data=json.dumps(json_content)) print(response.content)
1,882
def animate(zdata, xdata, ydata, conversionFactorArray, timedata, BoxSize, timeSteps=100, filename="particle"): """ Animates the particle's motion given the z, x and y signal (in Volts) and the conversion factor (to convert between V and nm). Parameters ---------- zdata : ndarray Array containing the z signal in volts with time. xdata : ndarray Array containing the x signal in volts with time. ydata : ndarray Array containing the y signal in volts with time. conversionFactorArray : ndarray Array of 3 values of conversion factors for z, x and y (in units of Volts/Metre) timedata : ndarray Array containing the time data in seconds. BoxSize : float The size of the box in which to animate the particle - in nm timeSteps : int, optional Number of time steps to animate filename : string, optional filename to create the mp4 under (<filename>.mp4) """ timePerFrame = 0.203 print("This will take ~ {} minutes".format(timePerFrame * timeSteps / 60)) convZ = conversionFactorArray[0] * 1e-9 convX = conversionFactorArray[1] * 1e-9 convY = conversionFactorArray[2] * 1e-9 ZBoxStart = -BoxSize # 1/conv*(_np.mean(zdata)-0.06) ZBoxEnd = BoxSize # 1/conv*(_np.mean(zdata)+0.06) XBoxStart = -BoxSize # 1/conv*(_np.mean(xdata)-0.06) XBoxEnd = BoxSize # 1/conv*(_np.mean(xdata)+0.06) YBoxStart = -BoxSize # 1/conv*(_np.mean(ydata)-0.06) YBoxEnd = BoxSize # 1/conv*(_np.mean(ydata)+0.06) FrameInterval = 1 # how many timesteps = 1 frame in animation a = 20 b = 0.6 * a myFPS = 7 myBitrate = 1000000 fig = _plt.figure(figsize=(a, b)) ax = fig.add_subplot(111, projection='3d') ax.set_title("{} us".format(timedata[0] * 1000000)) ax.set_xlabel('X (nm)') ax.set_xlim([XBoxStart, XBoxEnd]) ax.set_ylabel('Y (nm)') ax.set_ylim([YBoxStart, YBoxEnd]) ax.set_zlabel('Z (nm)') ax.set_zlim([ZBoxStart, ZBoxEnd]) ax.view_init(20, -30) # ax.view_init(0, 0) def setup_plot(): XArray = 1 / convX * xdata[0] YArray = 1 / convY * ydata[0] ZArray = 1 / convZ * zdata[0] scatter = ax.scatter(XArray, YArray, ZArray) return scatter, def animate(i): # print "\r {}".format(i), print("Frame: {}".format(i), end="\r") ax.clear() ax.view_init(20, -30) ax.set_title("{} us".format(int(timedata[i] * 1000000))) ax.set_xlabel('X (nm)') ax.set_xlim([XBoxStart, XBoxEnd]) ax.set_ylabel('Y (nm)') ax.set_ylim([YBoxStart, YBoxEnd]) ax.set_zlabel('Z (nm)') ax.set_zlim([ZBoxStart, ZBoxEnd]) XArray = 1 / convX * xdata[i] YArray = 1 / convY * ydata[i] ZArray = 1 / convZ * zdata[i] scatter = ax.scatter(XArray, YArray, ZArray) ax.scatter([XArray], [0], [-ZBoxEnd], c='k', alpha=0.9) ax.scatter([-XBoxEnd], [YArray], [0], c='k', alpha=0.9) ax.scatter([0], [YBoxEnd], [ZArray], c='k', alpha=0.9) Xx, Yx, Zx, Xy, Yy, Zy, Xz, Yz, Zz = [], [], [], [], [], [], [], [], [] for j in range(0, 30): Xlast = 1 / convX * xdata[i - j] Ylast = 1 / convY * ydata[i - j] Zlast = 1 / convZ * zdata[i - j] Alpha = 0.5 - 0.05 * j if Alpha > 0: ax.scatter([Xlast], [0 + j * 10], [-ZBoxEnd], c='grey', alpha=Alpha) ax.scatter([-XBoxEnd], [Ylast], [0 - j * 10], c='grey', alpha=Alpha) ax.scatter([0 - j * 2], [YBoxEnd], [Zlast], c='grey', alpha=Alpha) Xx.append(Xlast) Yx.append(0 + j * 10) Zx.append(-ZBoxEnd) Xy.append(-XBoxEnd) Yy.append(Ylast) Zy.append(0 - j * 10) Xz.append(0 - j * 2) Yz.append(YBoxEnd) Zz.append(Zlast) if j < 15: XCur = 1 / convX * xdata[i - j + 1] YCur = 1 / convY * ydata[i - j + 1] ZCur = 1 / convZ * zdata[i - j + 1] ax.plot([Xlast, XCur], [Ylast, YCur], [Zlast, ZCur], alpha=0.4) ax.plot_wireframe(Xx, Yx, Zx, color='grey') ax.plot_wireframe(Xy, Yy, Zy, color='grey') ax.plot_wireframe(Xz, Yz, Zz, color='grey') return scatter, anim = _animation.FuncAnimation(fig, animate, int(timeSteps / FrameInterval), init_func=setup_plot, blit=True) _plt.rcParams['animation.ffmpeg_path'] = '/usr/bin/ffmpeg' mywriter = _animation.FFMpegWriter(fps=myFPS, bitrate=myBitrate) # , fps = myFPS, bitrate = myBitrate) anim.save('{}.mp4'.format(filename), writer=mywriter) return None
1,883
def name_of_decompressed(filename): """ Given a filename check if it is in compressed type (any of ['.Z', '.gz', '.tar.gz', '.zip']; if indeed it is compressed return the name of the uncompressed file, else return the input filename. """ dct = { '.Z': re.compile('.Z$'), '.tar.gz': re.compile('.tar.gz$'), '.gz': re.compile('.gz$'), '.zip': re.compile('.zip$') } ctype = find_os_compression_type(filename) if ctype is None: return filename try: return re.sub(dct[ctype], '', filename) except: raise RuntimeError('[ERROR] decompress:name_of_decompressed Failed!')
1,884
def sample_distribution(distribution): """Sample one element from a distribution assumed to be an array of normalized probabilities. """ r = random.uniform(0, 1) s = 0 for i in range(len(distribution)): s += distribution[i] if s >= r: return i return len(distribution) - 1
1,885
def benchmark(func): """Decorator to mark a benchmark.""" BENCHMARKS[func.__name__] = func return func
1,886
def write_gvecs(fp, gvecs, kpath='/electrons/kpoint_0'): """ fill the electrons/kpoint_0/gvectors group in wf h5 file Args: fp (h5py.File): hdf5 file object gvecs (np.array): PW basis as integer vectors kpath (str, optional): kpoint group to contain gvecs, default is '/electrons/kpoint_0' Example: >>> fp = h5py.File('pwscf.pwscf.h5', 'w') >>> write_gvecs(fp, gvecs) >>> fp.close() """ fp.require_group(kpath) kgrp = fp[kpath] kgrp.create_dataset('gvectors', data=gvecs)
1,887
def do_positive_DFT(data_in, tmax): """ Do Discrete Fourier transformation and take POSITIVE frequency component part. Args: data_in (array): input data. tmax (float): sample frequency. Returns: data_s (array): output array with POSITIVE frequency component part. data_w (array): the Discrete Fourier Transform sample frequencies POSITIVE frequency component part. """ data_s = np.fft.fft(data_in) data_w = np.fft.fftfreq(tmax) # only take the positive frequency components return data_w[0:tmax//2], data_s[0:tmax//2]
1,888
def test_load_extension_valid(import_path: str) -> None: """It loads the extension.""" extension = load_extension(import_path) assert issubclass(extension, jinja2.ext.Extension)
1,889
def scroll_down(driver): """Scrolling the page for pages with infinite scrolling""" loading_thread = threading.Thread(target=loading) loading_thread.start() # Get scroll height. last_height = driver.execute_script("return document.body.scrollHeight") while True: # Scroll down to the bottom. driver.execute_script("window.scrollTo(0, document.body.scrollHeight);") # Wait to load the page. time.sleep(5) # Calculate new scroll height and compare with last scroll height. new_height = driver.execute_script("return document.body.scrollHeight") if new_height == last_height: break last_height = new_height loading_thread.loading_loop = False loading_thread.join()
1,890
def service(base_app, location): """Service fixture.""" return base_app.extensions["invenio-records-lom"].records_service
1,891
def test_is_divisible_by_6(s, result): """Test function returns expected result.""" from divisible_by_6 import is_divisible_by_6 assert is_divisible_by_6(s) == result
1,892
def check_file_content(path, expected_content): """Check file has expected content. :param str path: Path to file. :param str expected_content: Expected file content. """ with open(path) as input: return expected_content == input.read()
1,893
def get_arguments(deluge=False): """Retrieves CLI arguments from the 'addmedia' script and uses get_parser() to validate them. Returns the full file path to the config file in use and a dict of validated arguments from the MHParser object. """ # Check for deluge if deluge: return get_deluge_arguments() # Get parser parser = get_parser() # If no args, show help if len(sys.argv) == 1: parser.print_help() sys.exit(1) # Get validated args from parser new_args = parser.parse_args().__dict__ # Remove config to return separately config = new_args.pop('config') return config, new_args
1,894
def verify_apikey(payload, raiseonfail=False, override_authdb_path=None, override_permissions_json=None, config=None): """Checks if an API key is valid. This version does not require a session. Parameters ---------- payload : dict This dict contains a single key: - apikey_dict: the decrypted and verified API key info dict from the frontend. - user_id: the user ID of the person wanting to verify this key. - user_role: the user role of the person wanting to verify this key. raiseonfail : bool If True, will raise an Exception if something goes wrong. override_authdb_path : str or None If given as a str, is the alternative path to the auth DB. override_permissions_json : str or None If given as a str, is the alternative path to the permissions JSON to use. This is used to check if the user_id is allowed to actually verify ("read") an API key. config : SimpleNamespace object or None An object containing systemwide config variables as attributes. This is useful when the wrapping function needs to pass in some settings directly from environment variables. Returns ------- dict The dict returned is of the form:: {'success': True if API key is OK and False otherwise, 'messages': list of str messages if any} """ for key in ('reqid', 'pii_salt'): if key not in payload: LOGGER.error( "Missing %s in payload dict. Can't process this request." % key ) return { 'success': False, 'failure_reason': ( "invalid request: missing '%s' in request" % key ), 'apikey': None, 'expires': None, 'messages': ["Invalid API key request."], } for key in ('apikey_dict', 'user_id', 'user_role'): if key not in payload: LOGGER.error( '[%s] Invalid API key request, missing %s.' % (payload['reqid'], key) ) return { 'success': False, 'failure_reason': ( "invalid request: missing '%s' in request" % key ), 'messages': ["Some required keys are missing from payload."] } apikey_dict = payload['apikey_dict'] user_id = payload['user_id'] user_role = payload['user_role'] # check if the user is allowed to read the presented API key apikey_verify_allowed = check_user_access( {'user_id': user_id, 'user_role': user_role, 'action': 'view', 'target_name': 'apikey', 'target_owner': apikey_dict['uid'], 'target_visibility': 'private', 'target_sharedwith': None, 'reqid': payload['reqid'], 'pii_salt': payload['pii_salt']}, raiseonfail=raiseonfail, override_permissions_json=override_permissions_json, override_authdb_path=override_authdb_path ) if not apikey_verify_allowed['success']: LOGGER.error( "[%s] Invalid API key verification request. " "from user_id: %s, role: %s. The API key presented is " "not readable by this user." % (payload['reqid'], pii_hash(user_id, payload['pii_salt']), pii_hash(user_role, payload['pii_salt'])) ) return { 'success': False, 'failure_reason': ( "originating user is not allowed to operate on this API key" ), 'messages': ["API key verification failed. " "You are not allowed to operate on this API key."] } # this checks if the database connection is live currproc = mp.current_process() engine = getattr(currproc, 'authdb_engine', None) if override_authdb_path: currproc.auth_db_path = override_authdb_path if not engine: currproc.authdb_engine, currproc.authdb_conn, currproc.authdb_meta = ( authdb.get_auth_db( currproc.auth_db_path, echo=raiseonfail ) ) apikeys = currproc.authdb_meta.tables['apikeys_nosession'] # the apikey sent to us must match the stored apikey's properties: # - token # - userid # - expired must be in the future # - issued must be in the past # - not_valid_before must be in the past dt_utcnow = datetime.utcnow() sel = select([ apikeys.c.apikey, apikeys.c.expires, ]).select_from(apikeys).where( apikeys.c.apikey == apikey_dict['tkn'] ).where( apikeys.c.user_id == apikey_dict['uid'] ).where( apikeys.c.user_role == apikey_dict['rol'] ).where( apikeys.c.expires > dt_utcnow ).where( apikeys.c.issued < dt_utcnow ).where( apikeys.c.not_valid_before < dt_utcnow ) result = currproc.authdb_conn.execute(sel) row = result.fetchone() result.close() if row is not None and len(row) != 0: LOGGER.info( "[%s] No-session API key verified successfully. " "user_id: %s, role: '%s', audience: '%s', subject: '%s', " "apiversion: %s, expires on: %s" % (payload['reqid'], pii_hash(apikey_dict['uid'], payload['pii_salt']), apikey_dict['rol'], apikey_dict['aud'], apikey_dict['sub'], apikey_dict['ver'], apikey_dict['exp']) ) return { 'success': True, 'messages': [( "No-session API key verified successfully. Expires: %s." % row['expires'].isoformat() )] } else: LOGGER.error( "[%s] No-session API key verification failed. Failed key " "user_id: %s, role: '%s', audience: '%s', subject: '%s', " "apiversion: %s, expires on: %s" % (payload['reqid'], pii_hash(apikey_dict['uid'], payload['pii_salt']), apikey_dict['rol'], apikey_dict['aud'], apikey_dict['sub'], apikey_dict['ver'], apikey_dict['exp']) ) return { 'success': False, 'failure_reason': ( "key validation failed, " "provided key does not match stored key or has expired" ), 'messages': [( "API key could not be verified." )] }
1,895
def merge_image_data(dir_dict, output_image_file, logg): """ Merge image data in dir_dict. Parameters ---------- base_dir : str base directory of dir_dict dir_dict : dictionary containing pairs of directories and associated files output_image_file : str output image file string Returns ------- (err_code, file_list) err_code : int Non-zero value indicates error code, or zero on success. file_list : list of files merged """ file_list = [] for direct in dir_dict: file_name = dir_dict[direct] if file_name != "": file_list.append(os.path.join(direct, file_name)) if len(file_list) > 0: command_list = ["montage"] + file_list + ["-mode"] + ["Concatenate"] + [output_image_file] ret = run_cmd(command_list, logg) return (ret, file_list) else: return (-1, file_list)
1,896
def save_model(file_name_base, model): """Save and convert Keras model""" keras_file = f'{file_name_base}.h5' fdeep_file = f'{file_name_base}.json' print(f'Saving {keras_file}') model.save(keras_file, include_optimizer=False) print(f'Converting {keras_file} to {fdeep_file}.') convert_model.convert(keras_file, fdeep_file) print(f'Done converting {keras_file} to {fdeep_file}.')
1,897
def case_mc2us(x): """ mixed case to underscore notation """ return case_cw2us(x)
1,898
def _parse_arguments(): """ Constructs and parses the command line arguments for eg. Returns an args object as returned by parser.parse_args(). """ parser = argparse.ArgumentParser( description='eg provides examples of common command usage.' ) parser.add_argument( '-v', '--version', action='store_true', help='Display version information about eg' ) parser.add_argument( '-f', '--config-file', help='Path to the .egrc file, if it is not in the default location.' ) parser.add_argument( '-e', '--edit', action='store_true', help="""Edit the custom examples for the given command. If editor-cmd is not set in your .egrc and $VISUAL and $EDITOR are not set, prints a message and does nothing.""" ) parser.add_argument( '--examples-dir', help='The location to the examples/ dir that ships with eg' ) parser.add_argument( '-c', '--custom-dir', help='Path to a directory containing user-defined examples.' ) parser.add_argument( '-p', '--pager-cmd', help='String literal that will be invoked to page output.' ) parser.add_argument( '-l', '--list', action='store_true', help='Show all the programs with eg entries.' ) parser.add_argument( '--color', action='store_true', dest='use_color', default=None, help='Colorize output.' ) parser.add_argument( '-s', '--squeeze', action='store_true', default=None, help='Show fewer blank lines in output.' ) parser.add_argument( '--no-color', action='store_false', dest='use_color', help='Do not colorize output.' ) parser.add_argument( 'program', nargs='?', help='The program for which to display examples.' ) args = parser.parse_args() if len(sys.argv) < 2: # Too few arguments. We can't specify this using argparse alone, so we # have to manually check. parser.print_help() parser.exit() elif not args.version and not args.list and not args.program: parser.error(_MSG_BAD_ARGS) else: return args
1,899