content
stringlengths
22
815k
id
int64
0
4.91M
def is_permutation_matrix( m ): """ Test whether a numpy array is a `permutation matrix`_. .. _permutation_matrix: https://en.wikipedia.org/wiki/Permutation_matrix Args: m (mp.matrix): The matrix. Returns: (bool): True | False. """ m = np.asanyarray(m) return (m.ndim == 2 and m.shape[0] == m.shape[1] and (m.sum(axis=0) == 1).all() and (m.sum(axis=1) == 1).all() and ((m == 1) | (m == 0)).all())
3,900
def bilogplot(V, f0, fbin, x, y, **fmt): """Plot the spectrum of a band-pass modulator in dB. The plot is a logarithmic plot, centered in 0, corresponding to f0, extending to negative frequencies, with respect to the center frequencies and to positive frequencies. The plot employs a logarithmic x-axis transform far from the origin and a linear one close to it, allowing the x-axis to reach zero and extend to negative values as well. .. note:: This is implemented in a slightly different way from The MATLAB Delta Sigma Toolbox, where all values below ``xmin`` are clipped and the scale is always logarithmic. It our implementation, no clipping is done and below ``xmin`` the data is simply plotted with a linear scale. For this reason slightly different plots may be generated. **Parameters:** V : 1d-ndarray or sequence Hann-windowed FFT f0 : int Bin number of center frequency fbin : int Bin number of test tone x : 3-elements sequence-like x is a sequence of three *positive* floats: ``xmin``, ``xmax_left``, ``xmax_right``. ``xmin`` is the minimum value of the logarithmic plot range. ``xmax_left`` is the length of the plotting interval on the left (negative) side, ``xmax_right`` is its respective on the right (positive) side. y : 3-elements sequence-like y is a sequence of three floats: ``ymin``, ``ymax``, ``dy``. ``ymin`` is the minimum value of the y-axis, ``ymax`` its maximum value and ``dy`` is the ticks spacing. .. note:: The MATLAB Delta Sigma toolbox allows for a fourth option ``y_skip``, which is the ``incr`` value passed to MATLAB's ``axisLabels``. No such thing is supported here. A warning is issued if ``len(v) == 4``. Additional keyword parameters ``**fmt`` will be passed to matplotlib's ``semilogx()``. The FFT is smoothed before plotting and converted to dB. See :func:`logsmooth` for details regarding the algorithm used. **Returns:** *None* .. plot:: from __future__ import division from deltasigma import synthesizeNTF, simulateDSM from deltasigma import calculateSNR, ds_hann, bilogplot import pylab as plt import numpy as np f0 = 1./8 OSR = 64 order = 8 N = 8192 H = synthesizeNTF(order, OSR, 1, 1.5, f0) fB = int(np.ceil(N/(2. * OSR))) ftest = int(np.round(f0*N + 1./3 * fB)) u = 0.5*np.sin(2*np.pi*ftest/N*np.arange(N)) v, xn, xmax, y = simulateDSM(u, H) spec = np.fft.fft(v*ds_hann(N))/(N/4) X = spec[:int(N/2) + 1] plt.figure() bilogplot(X, int(f0*N), ftest, (.03, .3, .3), (-140, 0, 10)) """ V = carray(V) if len(V.shape) > 1: if np.prod(V.shape) > max(V.shape): raise ValueError("The input value V should have only one" + " non-unitary dimension.") V = V.squeeze() Xl = V[f0::-1] Xr = V[f0:] N = V.shape[0] - 1 fbin = abs(fbin - f0) fl, pl = _logsmooth2(Xl, fbin) fr, pr = _logsmooth2(Xr, fbin) p = np.concatenate((pl[::-1], pr)) f = np.concatenate((-fl[::-1], fr)) plt.plot(f, p, **fmt) plt.xscale('symlog', linthreshx=x[0], subsx=np.logspace(10**int(np.ceil(np.log10(x[0]))), 10**int(1+np.ceil(np.log10(max(x[2], x[1]))))) ) ax = plt.gca() ax.set_xlim([-x[1], x[2]]) ax.set_ylim([y[0], y[1]]) plt.grid(True) ytix = list(range(y[0], y[1] + 1, y[2])) ax.yaxis.set_ticks(ytix) # we do not support axis labels # set_(gca,'YTickLabel', axisLabels(ytix, y[3])) # if len(y) == 4 and not y[3] is None: warn("Specifying y_skip is not currently supported and " + "it will be ignored. Sorry!") return
3,901
def reproduce(param1: Param("The message", str)): """Function for reproducing results related to the library.""" pass
3,902
def create_security_role(connection, body, error_msg=None): """Create a new security role. Args: connection: MicroStrategy REST API connection object body: JSON-formatted definition of the dataset. Generated by `utils.formjson()`. error_msg (string, optional): Custom Error Message for Error Handling Returns: Complete HTTP response object. """ return connection.session.post( url=f'{connection.base_url}/api/securityRoles', headers={'X-MSTR-ProjectID': None}, json=body, )
3,903
def dbm_to_w(dbm): """Convert dBm to W.""" return 10 ** (dbm / 10.) * sc.milli
3,904
def print_usage( program_name, file_handle=sys.stdout ): """ Prints the script's usage to standard output. Takes 2 arguments: program_name - Name of the program currently executing. file_handle - File handle to print to. If omitted, defaults to standard output. Returns nothing. """ usage_str = \ """{program_name:s} [-f] [-h] [-l <path>,<width>,<height>] [-L <labeling_strategy>] <playlist_path> <experiment> <variable>[,<variable>[...]] <time_start>:<time_stop> <z_start>:<z_stop> <data_root> <url_prefix> <component_count> Creates a JSON playlist suitable for importing into a new Scalabel.ai labeling project at <playlist_path>. The playlist generated contains a sequence of "video frames" representing XY slices from an IWP data set. Existing IWP labels may be incorporated into the playlist to support label refinement and review, rather than initial label creation. The sequence of XY slices generated depends on the <labeling_strategy> requested. Depending on the strategy specified, each frame's video name is constructed such that Scalabel.ai's video labeling project partitions a playlist into logical chunks that are each reasonably ordered (e.g. all time steps for fixed XY slice or all XY slices for a fixed time step). Supported strategies are the following: {no_order:12s}- No order is specified. {xy_slices:12s}- Frames are sorted by location within the dataset. Labelers see all time steps for a specific XY slice. {z_stacks:12s}- Frames are sorted by time within the dataset. Labelers see all XY slices for a specific time step. {variables:12s}- Frames are sorted by time and location within the dataset. Labelers see each of the variables for a XY slice at a specific time step. Playlist frames have URLs comprised of <url_prefix> prepended to supplied <data_root> with <component_count>-many leading path components removed. This allows replacement of some portion of the labeling data's path with the Scalabel.ai's web server address like so: <data_root> is /data/iwp/R5F04/ <component_count> is 2 Scalabel.ai's web root is /data/iwp/ <url_prefix> is http://localhost:8686/items Generated frames will have URLs starting with: http://localhost:8686/items/R5F04/ To avoid headaches with Scalabel.ai's tool, Each frame's underlying data's path is checked for accessibility before writing the playlist JSON. Frame without data generate an error message on standard error and prevent the playlist from being written. The generate frames' metadata is structured such that, when exported from the labeling tool, the Scalabel labels may be extracted and converted into IWP labels for post-processing and configuration management. Individual frames are programmaticaly named using <experiment>, <variable>, <time_index>, and <z_index>, so that it survives the round trip into and out of the Scalabel.ai tool. The command line options shown above are described below: -f Force creation of the playlist JSON regardless of whether the frames' underlying data exists or not. If a datum doesn't exist, a warning is printed to standard error. -h Print this help message and exit. -l <path>,<width>,<height> Comma-delimited parameters specifying the serialized IWP labels to incorporate into the created playlist. Labels with normalized coordinates are loaded from <path> and are scaled to <width> x <height> pixels. -L <labeling_strategy> Strategy for sequencing the generated playlist. Must be one of: {no_order:s}, {xy_slices:s}, {z_stacks:s}, {variables:s}. See the description above for details. """.format( program_name=program_name, no_order="'{:s}'".format( iwp.scalabel.LabelingStrategyType.NO_ORDER.name.lower() ), xy_slices="'{:s}'".format( iwp.scalabel.LabelingStrategyType.XY_SLICES.name.lower() ), z_stacks="'{:s}'".format( iwp.scalabel.LabelingStrategyType.Z_STACKS.name.lower() ), variables="'{:s}'".format( iwp.scalabel.LabelingStrategyType.VARIABLES.name.lower() ) ) print( usage_str, file=file_handle )
3,905
def lml(alpha, beta, Phi, Y): """ 4 marks :param alpha: float :param beta: float :param Phi: array of shape (N, M) :param Y: array of shape (N, 1) :return: the log marginal likelihood, a scalar """ N = len(Phi) M = len(Phi[0]) part1 = (-N*0.5)*np.log(2*np.pi) wholePhi = np.dot(np.dot(Phi, alpha*np.identity(M)), Phi.T) wholeBeta = beta*np.identity(N) part2 = - 0.5*np.log(np.linalg.det(wholePhi + wholeBeta)) part3 = -0.5*np.dot(np.dot(Y.T, inv((wholePhi + wholeBeta))), Y) logFunc = part1 + part2 + part3 return logFunc[0][0]
3,906
def convert_env_var(var_name: str, *, cast_type: Callable[..., Any] = float, default: Any = None) -> Any: """ Attempts to read an environment variable value and cast it to a type. For example it permits getting numeric value(s) from os.environ :param var_name: Key to lookup from environment variables. :param cast_type: The callable instance to run the env string through if exists. :param default: Default value to return if the specified var_name does not exist in os.environ """ try: return cast_type(os.environ.get(var_name, default)) except (TypeError, ValueError): raise EnvironFetchException(f"Unable to cast to: {type(cast_type)}")
3,907
def balance_set(X, Y, adr_labels_size, nonadr_labels_size): """balances the set by doing up- and down -sampling to converge into the same class size # Arguments X - set samples Y - set labels adr_labels_size - ADR_MENTION_CLASS size nonadr_labels_size - NON_ADR_MENTION_CLASS size # Returns new_X - new balanced samples new_Y - new labels corresponding to new_X """ print("Performing Class Balancing...") adr_samples_needed = nonadr_labels_size - adr_labels_size new_X = [] new_Y = [] adr_labels_size = 0 nonadr_labels_size = 0 for index, example in enumerate(X): if adr_samples_needed > 0: if Y[index] == ADR_MENTION_CLASS_LABEL: new_X.append(example) # add original 'ADR' sample new_Y.append(ADR_MENTION_CLASS_LABEL) new_X.append(example) # add duplicate 'ADR' sample to perform Over-Sampling new_Y.append(ADR_MENTION_CLASS_LABEL) adr_labels_size += 2 adr_samples_needed -= 1 else: # we don't add original 'No ADR Mention' sample to perform Under-Sampling adr_samples_needed -= 1 else: if Y[index] == ADR_MENTION_CLASS_LABEL: adr_labels_size += 1 else: nonadr_labels_size += 1 new_X.append(example) # add original sample new_Y.append(Y[index]) # add original label print(" Updated dataset size: {}".format(len(new_X))) print(" {} class size: {}".format(ADR_MENTION_CLASS_NAME, adr_labels_size)) print(" {} class size: {}".format(NON_ADR_MENTION_CLASS_NAME, nonadr_labels_size)) return new_X, new_Y
3,908
def load_det_lcia(result_dir, method, act_code, det_lcia_dict=None): """Return precalculated deterministic LCIA score""" result_dir = Path(_check_result_dir(result_dir)) method = _check_method(method) if not det_lcia_dict: det_lcia_dict = _get_det_lcia_dict(result_dir, method) if not act_code in det_lcia_dict: raise ValueError("No deterministic result for activity with code {} " "in deterministic LCIA dictionary".format( act_code )) return det_lcia_dict[act_code]
3,909
def get_geometry(location, geolevel): """ Get geometry of a single location code/name """ if not utils.is_number(location) and location != "BR": assert geolevel, "You need to specify which geographic level this location is" location = ibgetools.ibge_encode(location, geolevel) if location == -1: return shapely.geometry.Polygon([]) url = build_url(location) geojson = get_geojson(url) features = utils.get_features(geojson) return shapely.geometry.shape(features[0]["geometry"])
3,910
def render_to_string(template, context={}, processors=None): """ A function for template rendering adding useful variables to context automatically, according to the CONTEXT_PROCESSORS settings. """ if processors is None: processors = () else: processors = tuple(processors) for processor in get_standard_processors() + processors: context.update(processor(get_request())) template = local.app.jinja2_env.get_template(template) return template.render(context)
3,911
def find_node_names(structure): """ Return the names of the nodes for the structure """ # Look through all of the items in the structure for names # Check through each of the lists and sub-lists names=set() for i in xrange(len(structure)): if isinstance(structure[i],basestring): # do not return joins if not structure[i] in [AND_DELIMITER, OR_DELIMITER, " "]: names.add(structure[i]) elif isinstance(structure[i], list): names.update(find_node_names(structure[i])) return names
3,912
def wind(path_to_shapes_of_land_surface, path_to_shapes_of_water_surface, path_to_onshore_output, path_to_offshore_output, config): """Create wind on- and offshore simulation input for renewables.ninja.""" write_parameters( bounds=config["scope"]["bounds"], resolution=config["parameters"]["ninja"]["resolution-grid"], path_to_shapes=path_to_shapes_of_land_surface, hub_height=config["parameters"]["ninja"]["hub-height"]["onshore"], turbine=config["parameters"]["ninja"]["turbine"]["onshore"], path_to_output=path_to_onshore_output ) write_parameters( bounds=config["scope"]["bounds"], resolution=config["parameters"]["ninja"]["resolution-grid"], path_to_shapes=path_to_shapes_of_water_surface, hub_height=config["parameters"]["ninja"]["hub-height"]["offshore"], turbine=config["parameters"]["ninja"]["turbine"]["offshore"], path_to_output=path_to_offshore_output )
3,913
def run_calcs(run_id, year, no_ef_countries, export_data=True, include_TD_losses=True, BEV_lifetime=180000, ICEV_lifetime=180000, flowtrace_el=True, allocation=True, production_el_intensity=679, incl_ei=False, energy_sens=False): """Run all electricity mix and vehicle calculations and exports results.""" # Korean el-mix 679 g CO2/kWh, from ecoinvent fp = os.path.curdir production, trades, trade_ef, country_total_prod_disagg, country_total_cons_disagg, g_raw, C = load_prep_el_data(fp, year) codecheck_file, elmixes, trade_only, country_el, CFEL, CFCI = el_calcs(flowtrace_el, run_id, fp, C, production, country_total_prod_disagg, country_total_cons_disagg, g_raw, trades, trade_ef, include_TD_losses, incl_ei, export_data) # Leontief electricity calculations results_toSI, ICEV_total_impacts, ICEV_prodEOL_impacts, ICEV_op_int = BEV_calcs(fp, country_el, production, elmixes, BEV_lifetime, ICEV_lifetime, production_el_intensity, CFCI, allocation, energy_sens) SI_fp = export_SI(run_id, results_toSI, production, trades, C, CFEL, no_ef_countries) pickle_results(run_id, results_toSI, CFEL, ICEV_total_impacts, codecheck_file, export_data) return results_toSI['BEV footprint'].xs('Consumption mix', level=1, axis=1), ICEV_prodEOL_impacts, ICEV_op_int, SI_fp
3,914
def handle_question(): """Save response and redirect to next question.""" # get the response choice choice = request.form['answer'] # add this response to the session responses = session[RESPONSES_KEY] responses.append(choice) session[RESPONSES_KEY] = responses if (len(responses) == len(survey.questions)): # They've answered all the questions! Thank them. return redirect("/complete") else: return redirect(f"/questions/{len(responses)}")
3,915
def makeNotePlayer(seq: Sequencer, out: PortInfo ) -> Callable[[int, bool], None]: """Returns a callable object that plays midi notes on a port.""" def playNote(note: int, enabled: bool) -> None: if enabled: seq.sendEvent(NoteOn(0, 0, note, 127), out) else: seq.sendEvent(NoteOff(0, 0, note, 0), out) return playNote
3,916
def se_resnet152(**kwargs): """TODO: Add Doc""" return _resnet("se_resnet152", **kwargs)
3,917
def file_to_attachment(filename): """ Convert a file to attachment """ with open(filename, 'rb') as _file: return {'_name':filename, 'content':base64.b64encode(_file.read()) }
3,918
def setup(bot: commands.Bot) -> None: """Load the Animals cog.""" bot.add_cog(Animals(bot))
3,919
def test_r2_7(): """CCSD T2 Residual [Vvvov,T1] (7)""" T1 = w.op("t", ["v+ o"]) Vvvov = w.op("v", ["v+ v+ v o"]) wt = w.WickTheorem() sum = wt.contract(w.rational(1), w.commutator(Vvvov, T1), 4, 4) val = sum.to_manybody_equation("r")["oo|vv"][0].rhs_expression() val2 = w.expression("-1/2 t^{o0}_{v2} v^{o1,v2}_{v0,v1}") print_comparison(val, val2) assert val == val2
3,920
def test_version() -> None: """Test version""" assert __version__ == "0.1.1"
3,921
def ratio_selection( strain_lst, ratio_lst, pressure_lst, temperature_lst, ratio_boundary, debug_plot=True, ): """ Args: strain_lst: ratio_lst: pressure_lst: temperature_lst: ratio_boundary: debug_plot: Returns: """ if debug_plot: plt.plot(strain_lst, ratio_lst) plt.axhline(0.5 + ratio_boundary, color="red", linestyle="--") plt.axhline(0.5, color="black", linestyle="--") plt.axhline(0.5 - ratio_boundary, color="red", linestyle="--") plt.xlabel("Strain") plt.ylabel("ratio solid vs. liquid") rat_lst, rat_col_lst = [], [] for rat in ratio_lst: if (0.5 - ratio_boundary) < rat < (0.5 + ratio_boundary): rat_lst.append(rat) elif len(rat_lst) != 0: rat_col_lst.append(rat_lst) rat_lst = [] if len(rat_lst) != 0: rat_col_lst.append(rat_lst) if len(rat_col_lst) != 0: rat_max_ind = np.argmax([len(lst) for lst in rat_col_lst]) ratio_ind = [r in rat_col_lst[rat_max_ind] for r in ratio_lst] strain_value_lst = np.array(strain_lst)[ratio_ind] ratio_value_lst = np.array(ratio_lst)[ratio_ind] pressure_value_lst = np.array(pressure_lst)[ratio_ind] temperature_value_lst = np.array(temperature_lst)[ratio_ind] if debug_plot: plt.axvline(np.min(strain_value_lst), color="blue", linestyle="--") plt.axvline(np.max(strain_value_lst), color="blue", linestyle="--") plt.show() if np.mean(ratio_value_lst) > 0.5: return ( strain_value_lst, ratio_value_lst, pressure_value_lst, temperature_value_lst, 1, ) else: return ( strain_value_lst, ratio_value_lst, pressure_value_lst, temperature_value_lst, -1, ) else: if np.mean(ratio_lst) > 0.5: return [], [], [], [], 1 else: return [], [], [], [], -1
3,922
async def test_import(hass, client): """Test we can import yaml config.""" assert client with patch("homeassistant.components.webostv.async_setup_entry", return_value=True): result = await hass.config_entries.flow.async_init( DOMAIN, context={CONF_SOURCE: config_entries.SOURCE_IMPORT}, data=MOCK_YAML_CONFIG, ) assert result["type"] == RESULT_TYPE_CREATE_ENTRY assert result["title"] == TV_NAME assert result["data"][CONF_HOST] == MOCK_YAML_CONFIG[CONF_HOST] assert result["data"][CONF_CLIENT_SECRET] == MOCK_YAML_CONFIG[CONF_CLIENT_SECRET] assert result["result"].unique_id == MOCK_YAML_CONFIG[CONF_UNIQUE_ID] with patch("homeassistant.components.webostv.async_setup_entry", return_value=True): result = await hass.config_entries.flow.async_init( DOMAIN, context={CONF_SOURCE: config_entries.SOURCE_IMPORT}, data=MOCK_YAML_CONFIG, ) assert result["type"] == RESULT_TYPE_ABORT assert result["reason"] == "already_configured"
3,923
def close(device_handle): """ reset the instrument """ dwf.FDwfDigitalInReset(device_handle) return
3,924
def diff_mean(rolling_window, axis=-1): """For M5 purposes, used on an object generated by the rolling_window function. Returns the mean of the first difference of a window of sales.""" return np.diff(rolling_window, axis=axis).mean(axis=axis)
3,925
def noiseFraction(truth_h5, measured_h5, tolerance): """ Return the fraction of measured localizations that are greater than tolerance pixels from the nearest truth localization. Note: This will return 0 if there are no measured localizations. truth_h5 - A saH5Py.SAH5Py object with the ground truth localizations. measured_h5 - A saH5Py.SAH5Py object with the found localizations. tolerance - The search radius in pixels. """ if (measured_h5.getNLocalizations() == 0): return [0, truth_h5.getNLocalizations()] noise_locs = 0 total_locs = 0 for i in range(truth_h5.getMovieLength()): t_locs = truth_h5.getLocalizationsInFrame(i) m_locs = measured_h5.getLocalizationsInFrame(i) if bool(t_locs) and bool(m_locs): dist = iaUtilsC.peakToPeakDistAndIndex(t_locs['x'], t_locs['y'], m_locs['x'], m_locs['y'], max_distance = tolerance)[0] noise_locs += numpy.count_nonzero((dist < 0.0)) total_locs += dist.size elif bool(t_locs): total_locs += t_locs['x'].size return [noise_locs, total_locs]
3,926
def iter_sliding_window(T, embedding): """ Use a sliding window approach to iteratively transport the embedding from one region of the Chimera graph to another. Example: >>> import embera >>> import networkx as nx >>> import dwave_networkx as dnx >>> import matplotlib.pyplot as plt >>> S = nx.complete_graph(11) >>> T = dnx.chimera_graph(7) >>> embedding = minorminer.find_embedding(S,T) >>> dnx.draw_chimera_embedding(T,embedding,node_size=10) >>> slide = embera.transform.embedding.sliding_window(T,embedding) >>> for new_embedding in slide: ... dnx.draw_chimera_embedding(T,new_embedding,node_size=10) ... plt.pause(0.2) """ tiling = DWaveNetworkXTiling(T) shape = np.array(tiling.shape) # Initialize edges origin = shape end = (0,)*len(origin) # Find edges for v,chain in embedding.items(): for q in chain: tile = np.array(tiling.get_tile(q)) origin = [min(t,o) for t,o in zip(tile,origin)] end = [max(t,e) for t,e in zip(tile,end)] # Move tiles to origin and translate to try and find valid embedding size = np.array(end) - np.array(origin) interactions = lambda u,v,E:((s,t) for s in E[u] for t in E[v]) is_connected = lambda edges: any(T.has_edge(s,t) for s,t in edges) for x in range(shape[1]-size[1]): for y in range(shape[0]-size[0]): slide = {} offset = np.array([x,y]) # Translate all qubits for v,chain in embedding.items(): new_chain = [] for q in chain: tile = np.array(tiling.get_tile(q)) new_tile = tuple(tile - np.array(origin) + offset) new_q = tiling.set_tile(q,new_tile) new_chain.append(new_q) slide[v] = new_chain yield slide
3,927
def prepare(compute: dict, script_id: str): """Prepare the script :param compute: The instance to be attacked. :param script_id: The script's filename without the filename ending. Is named after the activity name. :return: A tuple of the Command Id and the script content """ os_type = __get_os_type(compute) if os_type == OS_LINUX: command_id = 'RunShellScript' script_name = "{}.sh".format(script_id) else: if script_id in UNSUPPORTED_WINDOWS_SCRIPTS: raise InterruptExecution("'{}' is not supported for os '{}'" .format(script_id, OS_WINDOWS)) command_id = 'RunPowerShellScript' script_name = "{}.ps1".format(script_id) file_path = os.path.join(os.path.dirname(__file__), "../scripts", script_name) with open(file_path) as file_path: script_content = file_path.read() return command_id, script_content
3,928
def compute_rigid_flow(depth, pose, intrinsics, reverse_pose=False): """Compute the rigid flow from target image plane to source image Args: depth: depth map of the target image [batch, height_t, width_t] pose: target to source (or source to target if reverse_pose=True) camera transformation matrix [batch, 6], in the order of tx, ty, tz, rx, ry, rz; intrinsics: camera intrinsics [batch, 3, 3] Returns: Rigid flow from target image to source image [batch, height_t, width_t, 2] """ with tf.variable_scope('compute_rigid_flow'): batch, height, width = depth.get_shape().as_list() # Convert pose vector to matrix pose = pose_vec2mat(pose) if reverse_pose: pose = tf.matrix_inverse(pose) # Construct pixel grid coordinates pixel_coords = meshgrid(batch, height, width) tgt_pixel_coords = tf.transpose(pixel_coords[:,:2,:,:], [0, 2, 3, 1]) # Convert pixel coordinates to the camera frame cam_coords = pixel2cam(depth, pixel_coords, intrinsics) # Construct a 4x4 intrinsic matrix filler = tf.constant([0.0, 0.0, 0.0, 1.0], shape=[1, 1, 4]) filler = tf.tile(filler, [batch, 1, 1]) intrinsics = tf.concat([intrinsics, tf.zeros([batch, 3, 1])], axis=2) intrinsics = tf.concat([intrinsics, filler], axis=1) # Get a 4x4 transformation matrix from 'target' camera frame to 'source' # pixel frame. proj_tgt_cam_to_src_pixel = tf.matmul(intrinsics, pose) src_pixel_coords = cam2pixel(cam_coords, proj_tgt_cam_to_src_pixel) rigid_flow = src_pixel_coords - tgt_pixel_coords return rigid_flow
3,929
def get_versions(script_name): """ ่ฟ”ๅ›žๆŒ‡ๅฎšๅ็งฐ่„šๆœฌๅซๆœ‰็š„ๆ‰€ๆœ‰็‰ˆๆœฌใ€‚""" versions = repository.get(script_name, None) if not versions: return None return sorted(versions, reverse=True)
3,930
def data_static(filename): """ Get files :param filename: :return: """ _p, _f = os.path.split(filename) print(_p, _f) return flask.send_from_directory(os.path.join( '/Users/dmitryduev/_caltech/python/deep-asteroids/data-raw/', _p), _f)
3,931
def test_62_response(): """ VEN, EiEvent Service, oadrDistributeEvent, oadrCreatedEvent Payload The VEN must process EVERY oadrEvent event message (new, modified, cancelled, etc.) that it receives from the VTN in an oadrDistributeEvent payload and it MUST reply with a createdEvent message for every EIEvent message in which the responseRequired is set to always. Furthermore if the responseRequired is set to never, the VEN MUST NOT respond with a createdEvent message. It is at the complete discretion of the VTN as to whether responses are required from the VEN. Note that this rule is universal and applies to all scenarios including the following: The event is one in which the VEN is already aware. The event is being cancelled and the VEN did not even know it existed It does not matter how the EIEvent payloads were delivered, i.e. PUSH, PULL or as the result of being delivered in an ALL payload """ assert False
3,932
def get_key(): """ Gets the private key used to access Transcriptic's services. Returns ------- str """ if TRANSCRIPTIC_KEY is not None: return TRANSCRIPTIC_KEY return os.environ['TRANSCRIPTIC_KEY']
3,933
def linear_search_while(lst: list, value: Any) -> int: """Return the index of the first occurrence of value in lst, or return -1 if value is not in lst. >>> linear_search([2, 5, 1, -3], 5) 1 >>> linear_search([2, 4, 2], 2) 0 >>> linear_search([2, 5, 1, -3], 4) -1 >>> linear_search([], 5) -1 """ i = 0 # The index of the next item in lst to examine. # Keep going until we reach the end of lst or until we find value. while i != len(lst) and lst[i] != value: i = i + 1 # If we fell off the end of the list, we didn't find value. if i == len(lst): return -1 else: return i
3,934
def get_pagerduty_secret_name(): """ Get name of the PagerDuty secret for currently used addon. Returns: string: name of the secret """ return config.DEPLOYMENT["addon_name"] + constants.MANAGED_PAGERDUTY_SECRET_SUFFIX
3,935
def get_dummies( data: pandas.core.series.Series, prefix: Literal["X"], prefix_sep: Literal["-"], dummy_na: bool, columns: None, sparse: bool, drop_first: bool, dtype: Type[numpy.uint8], ): """ usage.dask: 2 """ ...
3,936
def check_docs( doc_path: str, recurse: bool = True, max_threads: int = 10, delay: float = 0 ) -> Dict[str, Dict[str, UrlResult]]: """ Check multiple HTML files in `doc_path`. Parameters ---------- doc_path : str Path recurse: bool If True, recurse subfolders, default is True max_threads: int, optional The maximum number of async threads to run delay: float, optional Seconds delay between requests Returns ------- Dict[str, Dict[str, UrlResult]] Dictionary of pages checked. Results for each page is a dictionary of checked links for the page. """ page_results: Dict[str, Dict[str, UrlResult]] = defaultdict(dict) link_results: Dict[str, UrlResult] = {} links_to_check = _get_links_from_files(doc_path, recurse) print(f"Checking links {len(links_to_check)}...") checked_links = check_uris(links_to_check, max_threads, delay) print("\ndone") for result in checked_links: link_results[result.url] = result src_pages = links_to_check[result.url] for src_page in src_pages: page_results[src_page][result.url] = result _print_url_results(page_results) return page_results
3,937
def get_cmd(): """Return a Collection instance for commands collection/table""" raise NotImplementedError()
3,938
def create_graph(filepath, nodes_data, legend=False): """Visualizes the energy system as graph. Creates, using the library Graphviz, a graph containing all components and connections from "nodes_data" and returns this as a PNG file. ---- Keyword arguments: filepath : obj:'str' -- path, where the PNG-result shall be saved nodes_data : obj:'dict' -- dictionary containing data from excel scenario file. legend : obj:'bool' -- specifies, whether a legend will be added to the graph or not ---- @ Christian Klemm - [email protected], 14.04.2020 """ def linebreaks(text): """Adds linebreaks a given string. Function which adds a line break to strings every ten characters. Up to four strings are added. ---- Keyword arguments: text : obj:'str' -- string to which line breaks will be added ---- @ Christian Klemm - [email protected], 14.04.2020 """ text_length = len(text) if text_length > 10: text = str(text[0:9] + "-\n" + text[9:]) if text_length > 20: text = str(text[0:21] + "-\n" + text[21:]) if text_length > 30: text = str(text[0:33] + "-\n" + text[33:]) if text_length > 40: text = str(text[0:45] + "-\n" + text[45:]) return text # Defines the location of Graphviz as path necessary for windows os.environ["PATH"] += \ os.pathsep + 'C:\\Program Files (x86)\\Graphviz2.38\\bin' # Creates the Directed-Graph dot = Digraph(format='png') # Creates a Legend if Legend = True if legend: component = ['Bus', 'Source', 'Sink', 'Transformer\nLinks', 'Storage'] shape = {'Bus': ['ellipse'], 'Source': ['trapezium'], 'Sink': ['invtrapezium'], 'Transformer\nLinks': ['box'], 'Storage': ['box']} for i in component: dot.node(i, shape=shape[i][0], fontsize="10", fixedsize='shape', width='1.1', height='0.6', style='dashed' if i == 'Storage' else '') components = ["buses", "sources", "demand", "transformers", "storages", "links"] shapes = {'sources': ['trapezium'], 'demand': ['invtrapezium'], 'transformers': ['box'], 'storages': ['box'], 'links': ['box']} bus = {'buses': ['label'], 'sources': ['output'], 'demand': ['input'], 'transformers': ['input'], 'storages': ['bus'], 'links': ['bus_1']} for i in components: for j, b in nodes_data[i].iterrows(): if b['active']: # sets component label label = b['label'] if i == 'buses': if b['shortage']: label = b['label'] + '_shortage' elif b['excess']: label = b['label'] + '_excess' label = linebreaks(label) if i != 'buses': dot.node(label, shape=shapes[i][0], fontsize="10", fixedsize='shape', width='1.1', height='0.6', style='dashed' if i == 'storages' else '') else: if b['shortage']: dot.node(label, shape='trapezium', fontsize="10", fixedsize='shape', width='1.1', height='0.6') if b['excess'] and not b['shortage']: dot.node(label, shape='invtrapezium', fontsize="10", fixedsize='shape', width='1.1', height='0.6') # creates bus nodes dot.node(b[bus[i][0]], shape='ellipse', fontsize="10") if i == 'links': dot.node(b['bus_2'], shape='ellipse') # creates edges if i == 'demand' or i == 'storages' or i == 'links' \ or (i == 'buses' and b['excess'] and not b['shortage']): dot.edge(b[bus[i][0]], label) if i == 'sources' or i == 'storages' \ or (i == 'buses' and b['shortage']): dot.edge(label, b[bus[i][0]]) if i == 'links': dot.edge(label, b['bus_2']) if b['(un)directed'] == 'undirected': dot.edge(b['bus_2'], label) dot.edge(label, b['bus_1']) elif i == 'transformers': dot.node(b['output'], shape='ellipse', fontsize="10") dot.edge(b[bus[i][0]], label) dot.edge(label, b['output']) if b['output2'] != "None": dot.node(b['output2'], shape='ellipse', fontsize="10") dot.edge(label, b['output2']) if b['transformer type'] == "HeatPump": # adds "_low_temp_source" to the label low_temp_source = label + '_low_temp_source' # Linebreaks, so that the labels fit the boxes low_temp_source = linebreaks(low_temp_source) # Adds a second input and a heat source (node and edge) # for heat pumps dot.node(label + '_low_temp_bus', shape='ellipse', fontsize="10") dot.edge(label + '_low_temp_bus', label) dot.node(low_temp_source, shape='trapezium', fontsize="10", fixedsize='shape', width='1.1', height='0.6') dot.edge(low_temp_source, label + '_low_temp_bus') elif i == 'buses': if b['excess'] and b['shortage']: label = b['label'] + '_excess' label = linebreaks(label) dot.node(label, shape='invtrapezium', fontsize="10", fixedsize='shape', width='1.1', height='0.6') dot.node(b[bus[i][0]], shape='ellipse', fontsize="10") dot.edge(b[bus[i][0]], label) dot.render(filepath + '/graph.gv', view=True)
3,939
def exec_cmd(cmd, secrets=None, timeout=600, ignore_error=False, **kwargs): """ Run an arbitrary command locally Args: cmd (str): command to run secrets (list): A list of secrets to be masked with asterisks This kwarg is popped in order to not interfere with subprocess.run(``**kwargs``) timeout (int): Timeout for the command, defaults to 600 seconds. ignore_error (bool): True if ignore non zero return code and do not raise the exception. Raises: CommandFailed: In case the command execution fails Returns: (CompletedProcess) A CompletedProcess object of the command that was executed CompletedProcess attributes: args: The list or str args passed to run(). returncode (str): The exit code of the process, negative for signals. stdout (str): The standard output (None if not captured). stderr (str): The standard error (None if not captured). """ masked_cmd = mask_secrets(cmd, secrets) log.info(f"Executing command: {masked_cmd}") if isinstance(cmd, str): cmd = shlex.split(cmd) completed_process = subprocess.run( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, timeout=timeout, **kwargs, ) masked_stdout = mask_secrets(completed_process.stdout.decode(), secrets) if len(completed_process.stdout) > 0: log.debug(f"Command stdout: {masked_stdout}") else: log.debug("Command stdout is empty") masked_stderr = mask_secrets(completed_process.stderr.decode(), secrets) if len(completed_process.stderr) > 0: log.warning(f"Command stderr: {masked_stderr}") else: log.debug("Command stderr is empty") log.debug(f"Command return code: {completed_process.returncode}") if completed_process.returncode and not ignore_error: raise CommandFailed( f"Error during execution of command: {masked_cmd}." f"\nError is {masked_stderr}" ) return completed_process
3,940
def timeParser(dstr): """ parse clock time string into array """ hh, mm, ss = dstr.split(':') return np.array([hh, mm, ss]).astype(int)
3,941
def get_43_ai_core_data(input_file=None): """Function for getting datas from aicore: ov/cnt/total_cyc/ov_cyc/pmu_cnt/stream_id.""" result_data = [] with open(input_file, 'rb') as ai_core_file: while True: line_ = ai_core_file.read(128) if line_: if not line_.strip(): continue else: break format_ = "BBHHHIIqqqqqqqqqqIIIIIIII" result_ = [hex(i) for i in struct.unpack(format_, line_)] byte01 = bin(int(result_[0].replace('0x', ''), 16)).replace('0b', '').zfill(8) ov = byte01[-4] cnt = byte01[0:4] total_cyc = int(result_[7].replace('0x', ''), 16) ov_cyc = int(result_[8].replace('0x', ''), 16) pmu_cnt = tuple(int(i.replace('0x', ''), 16) for i in result_[9:17]) stream_id = int(result_[17].replace('0x', ''), 16) result_data.append((ov, cnt, total_cyc, ov_cyc, stream_id, pmu_cnt)) return result_data
3,942
def get_configuration_class_with_attributes( klass: Type[AlgorithmConfiguration], ) -> Type[AlgorithmConfiguration]: """Get AlgorithmConfiguration with set attributes. Args: klass: a class to be used to extract attributes from. Returns: a class with the attributes set. """ configuration_class = deepcopy(AlgorithmConfiguration) setattr(configuration_class, "algorithm_type", klass.algorithm_type) setattr(configuration_class, "algorithm_name", klass.algorithm_name) setattr(configuration_class, "algorithm_application", klass.__name__) setattr(configuration_class, "algorithm_version", klass.algorithm_version) return configuration_class
3,943
def test_insertToTable(connect, cursor, refer_table): """Test inesrt to table function""" result = insertToTable(connect, cursor, refer_table, ["TYPES"], ["file"]) assert isinstance(result, object)
3,944
def main(): """ main fonksiyon; cerceve/iskelet/frame burada yaratiliyor """ reset_blend() props = bpy.context.scene.QueryProps height = props["height"] width = props["width"] depth = props["depth"] column_longer = props["column_longer"] row_longer = props["row_longer"] # height bpy.ops.mesh.primitive_cube_add() left_column = bpy.data.objects[0] left_column.name = "left_column" location(left_column, 0, -width/2) bpy.ops.mesh.primitive_cube_add() right_column = bpy.data.objects[0] right_column.name = "right_column" location(right_column, 0, width/2) # width bpy.ops.mesh.primitive_cube_add() top_column = bpy.data.objects[0] top_column.name = "top_column" location(top_column, 1, -height/2) bpy.ops.mesh.primitive_cube_add() bottom_column = bpy.data.objects[0] bottom_column.name = "Tbottom_column" location(bottom_column, 1, height/2) if column_longer: left_column.dimensions = 2, height+2, depth right_column.dimensions = 2, height+2, depth top_column.dimensions = width-1, 2, depth bottom_column.dimensions = width-1, 2, depth else: top_column.dimensions = width+2, 2, depth bottom_column.dimensions = width+2, 2, depth left_column.dimensions = 2, height-1, depth right_column.dimensions = 2, height-1, depth dims = width/48, height/24, depth/12 # top right bpy.ops.mesh.primitive_cube_add() plint_foot_1 = bpy.data.objects[0] plint_foot_1.name = "plint_foot_1" plint_foot_1.dimensions = dims print(plint_foot_1.dimensions.y) loc_y = - bottom_column.location.y - height/24/2 print(loc_y,- plint_foot_1.dimensions.y) plint_foot_1.location = right_column.location.x - right_column.location.x/6, loc_y, depth/4 # bottom right bpy.ops.mesh.primitive_cube_add() plint_foot_2 = bpy.data.objects[0] plint_foot_2.name = "plint_foot_2" plint_foot_2.location = right_column.location.x - right_column.location.x/6, loc_y, -depth/4 plint_foot_2.dimensions = dims # bottom left bpy.ops.mesh.primitive_cube_add() plint_foot_3 = bpy.data.objects[0] plint_foot_3.name = "plint_foot_3" plint_foot_3.location = left_column.location.x - left_column.location.x/6, loc_y, -depth/4 plint_foot_3.dimensions = dims # top left bpy.ops.mesh.primitive_cube_add() plint_foot_4 = bpy.data.objects[0] plint_foot_4.name = "plint_foot_4" plint_foot_4.location = left_column.location.x - left_column.location.x/6, loc_y, depth/4 plint_foot_4.dimensions = dims bpy.ops.mesh.primitive_cube_add() double_door = bpy.data.objects[0] double_door.name = "double_door" location(double_door, 2, -depth/2) double_door.dimensions = width, height, 1
3,945
def verify_kernel_cmdline(kernel_cmdline_golden_file, scrutiny_out): """verify_kernel_cmdline verifies the kernel cmdline in ZBI image. Raises: VerificationError: If verification fails. """ gf_checker = GoldenFileChecker(kernel_cmdline_golden_file) actual_cmd = [] if os.path.exists(os.path.join(scrutiny_out, 'sections', 'cmdline.blk')): try: with open(os.path.join(scrutiny_out, 'sections', 'cmdline.blk'), 'r') as f: # The cmdline.blk contains a trailing \x00. cmdline = f.read().strip().rstrip('\x00') except IOError as e: raise VerificationError(f'Failed to read cmdline.blk: {e}') cmdline_args = cmdline.split(' ') try: actual_cmd = generate_sorted_cmdline(cmdline_args) except CmdlineFormatError as e: raise VerificationError(f'Invalid golden cmdline format: {e}') errors = gf_checker.check_match(actual_cmd) if len(errors) > 0: error_msgs = ['Kernel cmdline mismatch!'] error_msgs.append('') error_msgs.extend(errors) error_msgs.append('') error_msgs.append(f'If you intended to change the kernel command line, please acknowledge it by updating {kernel_cmdline_golden_file} with the added or removed lines.') error_msgs.append(SOFT_TRANSITION_MESSAGE) raise VerificationError('\n'.join(error_msgs))
3,946
def get_token_symbol(token_address: str): """ Gets the token symbol If not have the external method `symbol` to get the score symbol, it will raise JSONRPCException. """ call = CallBuilder()\ .from_(wallet.get_address())\ .to(token_address)\ .method("symbol")\ .build() return icon_service.call(call)
3,947
def iceil(x): """ Return the ceiling of the input, element-wise. The ceil of the scalar `x` is the smallest integer `i`, such that `i >= x`. It is often denoted as :math:`\lceil x \rceil`. Parameters ---------- x : array_like Input data. Returns ------- y : {numpy.ndarray, scalar} The ceiling of each element in `x`, with `int` dtype. """ return np.ceil(x).astype(int)
3,948
def get_repo_version(filename: str, repo: str) -> Optional[str]: """Return the version (i.e., rev) of a repo Args: filename (str): .pre-commit-config.yaml repo (str): repo URL Returns: Optional[str]: the version of the repo """ with open(filename, "r") as stream: pre_commit_data = yaml.safe_load(stream) pre_config_repo = next( (item for item in pre_commit_data["repos"] if item["repo"] == repo), None ) if pre_config_repo: return pre_config_repo["rev"] return None
3,949
def write_readback(dic, data): """ Write out a NMRPipe file and read back in. """ # write out and read back tf = tempfile.mktemp(dir=".") ng.pipe.write(tf, dic, data) rdic, rdata = ng.pipe.read(tf) os.remove(tf) assert_array_equal(data, rdata) assert dic == rdic
3,950
def save_environment(): """Save a copy of environment.""" global saved_env u.verbose(1, "saving copy of environment") saved_env = copy.deepcopy(os.environ)
3,951
def plot_grad_flow(named_parameters: Iterator[Tuple[str, torch.nn.Parameter]]) -> plt.Figure: """ Plots the gradients flowing through different layers in the net during training. Can be used for checking for possible gradient vanishing / exploding problems. Usage: Plug this function in Trainer class after loss.backwards() as "plot_grad_flow(self.model.named_parameters())" to visualize the gradient flow """ ave_grads = [] max_grads = [] layers = [] for n, p in named_parameters: if p.requires_grad and ("bias" not in n): layers.append(n.replace('.weight', '')) ave_grads.append(p.grad.abs().mean()) max_grads.append(p.grad.abs().max()) fig, ax = plt.subplots() ax.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color="c") ax.bar(np.arange(len(max_grads)), ave_grads, alpha=0.1, lw=1, color="b") ax.hlines(0, 0, len(ave_grads) + 1, lw=2, color="k") ax.set_xticks(range(0, len(ave_grads), 1)) ax.set_xticklabels(layers, rotation=45) ax.set_xlim(left=0, right=len(ave_grads)) ax.set_ylim(bottom=-0.001, top=0.02) # zoom in on the lower gradient regions ax.set_xlabel("Layers") ax.set_ylabel("average gradient") ax.set_title("Gradient flow") ax.grid(True) ax.legend([Line2D([0], [0], color="c", lw=4), Line2D([0], [0], color="b", lw=4), Line2D([0], [0], color="k", lw=4)], ['max-gradient', 'mean-gradient', 'zero-gradient']) return fig
3,952
def mk_cli_context_settings( mk_db: CliCtxDbBase.MkFnT, ) -> Dict[str, Any]: """Create initial click context parameters for this cli application. This is currently used as input for autocompletion. Example: `@click.group(context_settings=mk_cli_context_settings())` See `init_cli_ctx` which depends on this. """ obj_d = mk_cli_db_obj_d(mk_db) return dict( obj=obj_d, # It it also possible to customize cli default values from here. # <https://click.palletsprojects.com/en/7.x/commands/#overriding-defaults> # default_map )
3,953
def get_file_action(header: 'dict[str,str]') -> str: """Gets action file form main repo Args: header (dict[str,str]): Header with auth token Raises: get_aciton_file_e: Raised when no aciton file was collected Returns: str: The content of the action file """ response = requests.get("https://api.github.com/repos/vovsike/ImageBuilderAPIScript/contents/action_raw.yaml", headers=header) try: response.raise_for_status() except HTTPError as get_aciton_file_e: print("Error getting action file") raise get_aciton_file_e content = ast.literal_eval(response.content.decode("utf-8")).get("content") return content
3,954
def sensor(raw_input_shape: StandardizedTensorShape, f: SensorFunction = None, sensor_id: str = None, history: int = None) \ -> Union[Callable[[SensorFunction], SensorLambda], SensorLambda]: """Decorator for creating sensors from functions. Usage: @sensor((5, 8)) def my_sensor(env, frame): sensor_reading = np.random.uniform(0, 1, (5, 8)) return sensor_reading kernel.add_module(my_sensor) """ if f is None: kwargs = {} if sensor_id is not None: kwargs.update(sensor_id=sensor_id) return partial(sensor, raw_input_shape, **kwargs) if sensor_id is None: sensor_id = get_default_sensor_id(f) if sensor_id in _SENSOR_MAP: sensor_obj = _SENSOR_MAP[sensor_id] if isinstance(sensor_obj, SensorHistory): wrapped = sensor_obj.wrapped else: wrapped = sensor_obj if wrapped.f != f or wrapped.raw_input_shape != raw_input_shape: warnings.warn("Redefining sensor %s with function %s and shape %s.\n" "Original function: %s\nOriginal shape: %s" % (sensor_id, f, raw_input_shape, wrapped.f, wrapped.raw_input_shape)) else: return sensor_obj sensor_obj = wraps(f)(SensorLambda(sensor_id, raw_input_shape, f)) if history is not None: sensor_obj = SensorHistory(sensor_obj, history) _SENSOR_MAP[sensor_id] = sensor_obj return sensor_obj
3,955
def convert_event(ui_event): """Converts ui.event into ecs.event This maps keyboard entries into something that the system can handle TODO: Add a movement system """ if isinstance(ui_event, KeyboardEvent): vim_movement_mapper = { # Cardinal KeyboardEvent("h"): vector.LEFT, KeyboardEvent("j"): vector.DOWN, KeyboardEvent("k"): vector.UP, KeyboardEvent("l"): vector.RIGHT, # Diagonals KeyboardEvent("y"): vector.UP_LEFT, KeyboardEvent("u"): vector.UP_RIGHT, KeyboardEvent("b"): vector.DOWN_LEFT, KeyboardEvent("n"): vector.DOWN_RIGHT, # No movement KeyboardEvent("."): vector.NONE, } movement = vim_movement_mapper.get(ui_event) if movement: ecs_event = Event("MOVE", settings.player, movement) return ecs_event if ui_event == KeyboardEvent('return', 13, meta=True): tdl.console_set_fullscreen(not tdl.console_is_fullscreen()) return None if ui_event == KeyboardEvent("escape"): exit(0)
3,956
def collection(collection, _pod=None): """Retrieves a collection from the pod.""" return _pod.get_collection(collection)
3,957
def appif(cfg): """ Return interface belonging to application """ return get_interface_of_network(appnet(cfg)['name'])
3,958
def f(x): """ Try and have the NN approximate the xor function. """ if x[0] == x[1]: return 0. else: return 1.
3,959
def dataframe_to_list(df: pandas.DataFrame) -> list: """ Use caution with datetime columns, as they may not be de/serialized as desired """ return json.loads(df.to_json(orient="records"))
3,960
def decimal_to_binary(integer,nbits=8,grouped=0): """Converts integer to binary string of length nbits, sign bit and then m.s.b. on the left. Negative numbers are twos-complements, i.e., bitwise complement + 1.""" # Just remember that minus sign and ignore it if integer < 0: negative = True integer = abs(integer+1) else: negative = False # build up the strin result = '' # part of number left to process remaining_integer = integer while (remaining_integer > 0) & (nbits > 0): lsb = remaining_integer % 2 if negative: lsb = 1-lsb result = ''.join((str(lsb),result)) remaining_integer = remaining_integer >> 1 nbits -= 1 while nbits > 0: if negative: result = ''.join(('1',result)) else: result = ''.join(('0',result)) nbits -= 1 if grouped: temp = result result = "" for bit in range(len(temp)): if bit and (bit % grouped) == 0: result += ' ' result += temp[bit] return result
3,961
def line_integrals(state, uloc, vloc, kind="same"): """ calculate line integrals along all islands Arguments: kind: 'same' calculates only line integral contributions of an island with itself, while 'full' calculates all possible pairings between all islands. """ vs = state.variables nisle = state.dimensions["isle"] ipx, ipy = runtime_state.proc_idx if ipx == 0: i = slice(1, -2) ip1 = slice(2, -1) else: i = slice(2, -2) ip1 = slice(3, -1) if ipy == 0: j = slice(1, -2) jp1 = slice(2, -1) else: j = slice(2, -2) jp1 = slice(3, -1) east = ( vloc[i, j, :] * vs.dyu[npx.newaxis, j, npx.newaxis] + uloc[i, jp1, :] * vs.dxu[i, npx.newaxis, npx.newaxis] * vs.cost[npx.newaxis, jp1, npx.newaxis] ) west = ( -vloc[ip1, j, :] * vs.dyu[npx.newaxis, j, npx.newaxis] - uloc[i, j, :] * vs.dxu[i, npx.newaxis, npx.newaxis] * vs.cost[npx.newaxis, j, npx.newaxis] ) north = ( vloc[i, j, :] * vs.dyu[npx.newaxis, j, npx.newaxis] - uloc[i, j, :] * vs.dxu[i, npx.newaxis, npx.newaxis] * vs.cost[npx.newaxis, j, npx.newaxis] ) south = ( -vloc[ip1, j, :] * vs.dyu[npx.newaxis, j, npx.newaxis] + uloc[i, jp1, :] * vs.dxu[i, npx.newaxis, npx.newaxis] * vs.cost[npx.newaxis, jp1, npx.newaxis] ) if kind == "same": east = npx.sum(east * vs.line_dir_east_mask[i, j], axis=(0, 1)) west = npx.sum(west * vs.line_dir_west_mask[i, j], axis=(0, 1)) north = npx.sum(north * vs.line_dir_north_mask[i, j], axis=(0, 1)) south = npx.sum(south * vs.line_dir_south_mask[i, j], axis=(0, 1)) return global_sum(east + west + north + south) elif kind == "full": isle_int = npx.empty((nisle, nisle)) def loop_body(isle, isle_int): east_isle = npx.sum( east[..., isle, npx.newaxis] * vs.line_dir_east_mask[i, j], axis=(0, 1), ) west_isle = npx.sum( west[..., isle, npx.newaxis] * vs.line_dir_west_mask[i, j], axis=(0, 1), ) north_isle = npx.sum( north[..., isle, npx.newaxis] * vs.line_dir_north_mask[i, j], axis=(0, 1), ) south_isle = npx.sum( south[..., isle, npx.newaxis] * vs.line_dir_south_mask[i, j], axis=(0, 1), ) isle_int = update(isle_int, at[:, isle], east_isle + west_isle + north_isle + south_isle) return isle_int isle_int = for_loop(0, nisle, loop_body, isle_int) return global_sum(isle_int) else: raise ValueError('"kind" argument must be "same" or "full"')
3,962
def _BBANDS(kwargs): """ ๅธƒๆž—ๅธฆ ๆŠ€ๆœฏๅ‚ๆ•ฐ ------- ไฝฟ็”จ21ๅคฉ๏ผŒ2ๅ€ """ df = kwargs.get('df') limit_start = kwargs.get('limit_start') limit_end = kwargs.get('limit_end') ndays = 21 inds = indicators( 'BBANDS', df, timeperiod=ndays).loc[limit_start:limit_end, :] traces = [] for c in inds.columns: name = 'price_{}_{}'.format(c, ndays) trace = go.Scatter( x=np.arange(inds.shape[0]), y=inds[c], name=name, ) traces.append(trace) return traces
3,963
def test_joint_refinement(dials_regression, run_in_tmpdir): """A basic test of joint refinement of the CS-PAD detector at hierarchy level 2 with 300 crystals.""" from dials.array_family import flex bevington = pytest.importorskip("scitbx.examples.bevington") if not hasattr(bevington, "non_linear_ls_eigen_wrapper"): pytest.skip("Skipping test as SparseLevMar engine not available") data_dir = os.path.join(dials_regression, "refinement_test_data", "xfel_metrology") # Do refinement and load the history result = procrunner.run( [ "dials.refine", os.path.join(data_dir, "benchmark_level2d.json"), os.path.join(data_dir, "benchmark_level2d.pickle"), os.path.join(data_dir, "refine.phil"), "history=history.json", ] ) assert not result.returncode and not result.stderr # there are plenty of things we could do with the refinement history, but # here just check that final RMSDs are low enough history = Journal.from_json_file("history.json") final_rmsd = history["rmsd"][-1] assert final_rmsd[0] < 0.0354 assert final_rmsd[1] < 0.0406 assert final_rmsd[2] < 0.0018 # also check that the used_in_refinement flag got set correctly rt = flex.reflection_table.from_file("refined.refl") uir = rt.get_flags(rt.flags.used_in_refinement) assert uir.count(True) == history["num_reflections"][-1]
3,964
def make_colors(color: OpColor, fill_color: OpColor, colors: Optional[Iterable[OpColor]]) -> Tuple[OpColor, ...]: """Creates final colors tuple.""" if colors is None: return conform_color(color), conform_color(fill_color), *DEFAULT_COLORS[2:] colors = [conform_color(c) for c, _ in zip(colors, range(len(DEFAULT_COLORS)))] colors.extend(DEFAULT_COLORS[len(colors):]) return tuple(colors)
3,965
def print_qa(questions, answers_gt, answers_gt_original, answers_pred, era, similarity=dirac, path=''): """ In: questions - list of questions answers_gt - list of answers (after modifications like truncation) answers_gt_original - list of answers (before modifications) answers_pred - list of predicted answers era - current era similarity - measure that measures similarity between gt_original and prediction; by default dirac measure path - path for the output (if empty then stdout is used) by fedault an empty path Out: the similarity score """ if len(questions) != len(answers_gt): raise AssertionError('Diferent questions and answers_gt lengths.') if len(questions) != len(answers_pred): raise AssertionError('Diferent questions and answers_pred lengths.') output = ['-' * 50, 'Era {0}'.format(era)] score = 0.0 for k, q in list(enumerate(questions)): a_gt = answers_gt[k] a_gt_original = answers_gt_original[k] a_p = answers_pred[k] score += dirac(a_p, a_gt_original) if isinstance(q[0], unicode_fn): tmp = unicode_fn('question: {0}\nanswer: {1}\nanswer_original: {2}\nprediction: {3}\n') else: tmp = 'question: {0}\nanswer: {1}\nanswer_original: {2}\nprediction: {3}\n' output.append(tmp.format(q, a_gt, a_gt_original, a_p)) score = (score / len(questions)) * 100.0 output.append('Score: {0}'.format(score)) if path == '': print('%s' % '\n'.join(map(str, output))) else: list2file(path, output) return score
3,966
def recast_to_supercell(z, z_min, z_max): """Gets the position of the particle at ``z`` within the simulation supercell with boundaries ``z_min`` y ``z_max``. If the particle is outside the supercell, it returns the position of its closest image. :param z: :param z_min: :param z_max: :return: """ sc_size = (z_max - z_min) return z_min + (z - z_min) % sc_size
3,967
def set_transactions(): """ Save to database all the transactions. """ print 'Updating transactions =>', last_transaction = db.simple_query('SELECT MAX(date) FROM transactions')[0][0] if last_transaction: until_date = last_transaction - timedelta(days=10) else: until_date = date.today() - timedelta(days=10) news = com.get_news(until_date) for new in news: ndate, title, text = new if 'Fichajes' not in title: continue pattern = re.compile( ur'(?:(?:\\n)?([(\S+ )]+?)(?: cambia por )([0-9\.,]*?)(?: .*? de )(.+?) a (.+?)\.)', re.UNICODE) transactions = re.findall(pattern, text) for trans in transactions: playername, value, fr, to = trans value = int(value.replace('.', '')) playername = playername.strip() try: player_id = db.simple_query('SELECT idp FROM players WHERE name LIKE "%%%s%%"' % playername)[0][0] if 'Computer' in fr: kind = 'Buy' user_id = db.simple_query('SELECT idu FROM users WHERE name LIKE "%%%s%%"' % to)[0][0] db.commit_query( 'INSERT IGNORE INTO transactions (idp, idu, type, price, date) VALUES (%s,%s,"%s",%s,"%s")' % (player_id, user_id, kind, value, ndate)) elif 'Computer' in to: kind = 'Sell' user_id = db.simple_query('SELECT idu FROM users WHERE name LIKE "%%%s%%"' % fr)[0][0] db.commit_query( 'INSERT IGNORE INTO transactions (idp, idu, type, price, date) VALUES (%s,%s,"%s",%s,"%s")' % (player_id, user_id, kind, value, ndate)) else: kind = 'Buy' user_id = db.simple_query('SELECT idu FROM users WHERE name LIKE "%%%s%%"' % to)[0][0] db.commit_query( 'INSERT IGNORE INTO transactions (idp, idu, type, price, date) VALUES (%s,%s,"%s",%s,"%s")' % (player_id, user_id, kind, value, ndate)) user_id = db.simple_query('SELECT idu FROM users WHERE name LIKE "%%%s%%"' % fr)[0][0] kind = 'Sell' db.commit_query( 'INSERT IGNORE INTO transactions (idp, idu, type, price, date) VALUES (%s,%s,"%s",%s,"%s")' % (player_id, user_id, kind, value, ndate)) except IndexError: # Player selled before having in database pass print '%sdone%s.' % (GREEN, ENDC)
3,968
def test_vacuum_hive_table(params, calls): """While writing create a hive table with and without ZOPTIMIZE.""" # Arrange spark_session = Mock() # Act BatchDelta.vacuum(spark_session, "path/to/delta/files", **params) # Assert assert spark_session.sql.call_count == 1 spark_session.sql.assert_has_calls(calls)
3,969
def lca_operation(locator): """ Algorithm to calculate the primary energy and GHG_kgCO2MJ emissions of buildings according to the method used in the integrated model of [Fonseca-Schlueter-2015]_ and the performance factors of [ecobau.ch]. :param locator: an InputLocator instance set to the scenario to work on :type locator: InputLocator The following file is created by this script: - total_LCA_operation: .csv csv file of yearly non-renewable primary energy demand and GHG_kgCO2MJ emissions per building for all energy services (i.e. heating, hot water, cooling, electricity) both total and per square meter :returns: This function does not return anything :rtype: NoneType .. [Fonseca-Schlueter-2015] J. Fonseca & A. Schlueter (2015) "Integrated model for characterization of spatiotemporal building energy consumption patterns in neighborhoods and city districts". Applied Energy 142. """ # get local files ## get demand results for the scenario demand = pd.read_csv(locator.get_total_demand()) ## get the supply systems for each building in the scenario supply_systems = gpdf.from_file(locator.get_building_supply()).drop('geometry', axis=1) ## get the non-renewable primary energy and greenhouse gas emissions factors for each supply system in the database data_all_in_one_systems = pd.read_excel(locator.get_database_supply_assemblies(), sheet_name=None) factors_heating = data_all_in_one_systems['HEATING'] factors_dhw = data_all_in_one_systems['HOT_WATER'] factors_cooling = data_all_in_one_systems['COOLING'] factors_electricity = data_all_in_one_systems['ELECTRICITY'] factors_resources = pd.read_excel(locator.get_database_feedstocks(), sheet_name=None) # get the mean of all values for this factors_resources_simple = [(name, values['GHG_kgCO2MJ'].mean()) for name, values in factors_resources.items()] factors_resources_simple = pd.DataFrame(factors_resources_simple, columns=['code', 'GHG_kgCO2MJ']).append( # append NONE choice with zero values {'code': 'NONE'}, ignore_index=True).fillna(0) # local variables Qhs_flag = Qww_flag = Qcs_flag = E_flag = True # calculate the total operational non-renewable primary energy demand and GHG_kgCO2MJ emissions ## create data frame for each type of end use energy containing the type of supply system use, the final energy ## demand and the primary energy and emissions factors for each corresponding type of supply system heating_factors = factors_heating.merge(factors_resources_simple, left_on='feedstock', right_on='code')[ ['code_x', 'feedstock', 'GHG_kgCO2MJ']] cooling_factors = factors_cooling.merge(factors_resources_simple, left_on='feedstock', right_on='code')[ ['code_x', 'feedstock', 'GHG_kgCO2MJ']] dhw_factors = factors_dhw.merge(factors_resources_simple, left_on='feedstock', right_on='code')[ ['code_x', 'feedstock', 'GHG_kgCO2MJ']] electricity_factors = factors_electricity.merge(factors_resources_simple, left_on='feedstock', right_on='code')[ ['code_x', 'feedstock', 'GHG_kgCO2MJ']] heating = supply_systems.merge(demand, on='Name').merge(heating_factors, left_on='type_hs', right_on='code_x') dhw = supply_systems.merge(demand, on='Name').merge(dhw_factors, left_on='type_dhw', right_on='code_x') cooling = supply_systems.merge(demand, on='Name').merge(cooling_factors, left_on='type_cs', right_on='code_x') electricity = supply_systems.merge(demand, on='Name').merge(electricity_factors, left_on='type_el', right_on='code_x') ## calculate the operational primary energy and emissions for heating services heating_services = [(Qhs_flag, 'DH_hs_MWhyr', 'DH_hs', 'Af_m2'), (Qhs_flag, 'SOLAR_hs_MWhyr', 'SOLAR_hs', 'Af_m2'), (Qhs_flag, 'NG_hs_MWhyr', 'NG_hs', 'Af_m2'), (Qhs_flag, 'COAL_hs_MWhyr', 'COAL_hs', 'Af_m2'), (Qhs_flag, 'OIL_hs_MWhyr', 'OIL_hs', 'Af_m2'), (Qhs_flag, 'WOOD_hs_MWhyr', 'WOOD_hs', 'Af_m2')] for x in heating_services: fields_to_plot = ['Name', 'GFA_m2', x[2] + '_tonCO2'] # calculate the total (t GHG_kgCO2MJ-eq) and specific (kg GHG_kgCO2MJ-eq/m2) operational greenhouse gas emissions (O_ghg_) heating[fields_to_plot[2]] = heating[x[1]] * heating['GHG_kgCO2MJ'] * 3.6 ## calculate the operational primary energy and emissions for domestic hot water services dhw_services = [(Qww_flag, 'DH_ww_MWhyr', 'DH_ww'), (Qww_flag, 'SOLAR_ww_MWhyr', 'SOLAR_ww'), (Qww_flag, 'NG_ww_MWhyr', 'NG_ww'), (Qww_flag, 'COAL_ww_MWhyr', 'COAL_ww'), (Qww_flag, 'OIL_ww_MWhyr', 'OIL_ww'), (Qww_flag, 'WOOD_ww_MWhyr', 'WOOD_ww')] for x in dhw_services: fields_to_plot = ['Name', 'GFA_m2', x[2] + '_tonCO2'] # calculate the total (t GHG_kgCO2MJ-eq) and specific (kg GHG_kgCO2MJ-eq/m2) operational greenhouse gas emissions (O_ghg_) dhw[fields_to_plot[2]] = dhw[x[1]] * dhw['GHG_kgCO2MJ'] * 3.6 ## calculate the operational primary energy and emissions for cooling services cooling_services = [(Qcs_flag, 'DC_cs_MWhyr', 'DC_cs'), (Qcs_flag, 'DC_cdata_MWhyr', 'DC_cdata'), (Qcs_flag, 'DC_cre_MWhyr', 'DC_cre')] for x in cooling_services: fields_to_plot = ['Name', 'GFA_m2', x[2] + '_tonCO2'] cooling[fields_to_plot[2]] = cooling[x[1]] * cooling['GHG_kgCO2MJ'] * 3.6 ## calculate the operational primary energy and emissions for electrical services electrical_services = [(E_flag, 'GRID_MWhyr', 'GRID'), (E_flag, 'PV_MWhyr', 'PV')] for x in electrical_services: fields_to_plot = ['Name', 'GFA_m2', x[2] + '_tonCO2'] electricity[fields_to_plot[2]] = electricity[x[1]] * electricity['GHG_kgCO2MJ'] * 3.6 # create a dataframe with the results for each energy service result = heating.merge(dhw, on='Name', suffixes=['_a', '_b']).merge(cooling, on='Name', suffixes=['a', '_b']).merge( electricity, on='Name') result.rename(columns={'GFA_m2_x': 'GFA_m2'}, inplace=True) # calculate the total operational non-renewable primary energy demand and emissions as a sum of the results for each # energy service used in the building result['GHG_sys_tonCO2'] = 0.0 all_services = electrical_services + cooling_services + heating_services + dhw_services fields_to_plot = [] for service in all_services: fields_to_plot += [service[2] + '_tonCO2'] result['GHG_sys_district_scale_tonCO2'] = result['GRID_tonCO2'] + \ result['DH_hs_tonCO2'] + \ result['DH_ww_tonCO2'] + \ result['DC_cdata_tonCO2'] + \ result['DC_cs_tonCO2'] + \ result['DC_cre_tonCO2'] result['GHG_sys_building_scale_tonCO2'] = result['OIL_hs_tonCO2'] + \ result['NG_hs_tonCO2'] + \ result['WOOD_hs_tonCO2'] + \ result['COAL_hs_tonCO2'] + \ result['SOLAR_hs_tonCO2'] + \ result['PV_tonCO2'] + \ result['OIL_ww_tonCO2'] + \ result['NG_ww_tonCO2'] + \ result['WOOD_ww_tonCO2'] + \ result['COAL_ww_tonCO2'] + \ result['SOLAR_ww_tonCO2'] result['GHG_sys_tonCO2'] = result['GHG_sys_building_scale_tonCO2'] + result['GHG_sys_district_scale_tonCO2'] # export the total operational non-renewable energy demand and emissions for each building fields_to_plot = ['Name', 'GFA_m2', 'GHG_sys_tonCO2', 'GHG_sys_building_scale_tonCO2', 'GHG_sys_district_scale_tonCO2'] + fields_to_plot result[fields_to_plot].to_csv(locator.get_lca_operation(), index=False, float_format='%.2f', na_rep='nan')
3,970
def list_services(request): """ Should probably move this to an Ajax JSON request like the probe. """ if request.method == "POST": action = request.POST.get("action") sid = request.POST.get("id") logger.debug(f"-- action: {action} sid: {sid}") if action == "delete": logger.debug(f"-- deleting: {sid}") response = tycho.delete({"name": sid}) sleep(2) logger.debug(f"-- delete response: status: {response}") return HttpResponseRedirect("/apps/")
3,971
def open_sat_data( zarr_path: Union[Path, str], convert_to_uint8: bool = True, ) -> xr.DataArray: """Lazily opens the Zarr store. Args: zarr_path: Cloud URL or local path pattern. If GCP URL, must start with 'gs://' """ _log.debug("Opening satellite data: %s", zarr_path) # Silence the warning about large chunks. # Alternatively, we could set this to True, but that slows down loading a Satellite batch # from 8 seconds to 50 seconds! dask.config.set({"array.slicing.split_large_chunks": False}) # Open the data dataset = xr.open_dataset(zarr_path, engine="zarr", chunks="auto") # Flip coordinates to top-left first dataset = dataset.reindex(y=dataset.y[::-1]) dataset = dataset.reindex(x=dataset.x[::-1]) # Rename # These renamings will no longer be necessary when the Zarr uses the 'correct' names, # see https://github.com/openclimatefix/Satip/issues/66 if "variable" in dataset: dataset = dataset.rename({"variable": "channel"}) elif "channel" not in dataset: # This is HRV version 3, which doesn't have a channels dim. So add one. dataset = dataset.expand_dims(dim={"channel": ["HRV"]}, axis=1) # Rename coords to be more explicit about exactly what some coordinates hold: # Note that `rename` renames *both* the coordinates and dimensions, and keeps # the connection between the dims and coordinates, so we don't have to manually # use `data_array.set_index()`. dataset = dataset.rename( { "time": "time_utc", "y": "y_geostationary", "x": "x_geostationary", } ) data_array = dataset["data"] del dataset # Ensure the y and x coords are in the right order (top-left first): assert data_array.y_geostationary[0] > data_array.y_geostationary[-1] assert data_array.x_geostationary[0] < data_array.x_geostationary[-1] assert data_array.y_osgb[0, 0] > data_array.y_osgb[-1, 0] assert data_array.x_osgb[0, 0] < data_array.x_osgb[0, -1] if convert_to_uint8: data_array = data_array.clip(min=0, max=1023) data_array.data = (data_array.astype(np.float32).data / 4.0).round().astype(np.uint8) # Sanity checks! assert data_array.dims == ("time_utc", "channel", "y_geostationary", "x_geostationary") datetime_index = pd.DatetimeIndex(data_array.time_utc) assert datetime_index.is_unique assert datetime_index.is_monotonic_increasing # Satellite datetimes can sometimes be 04, 09, minutes past the hour, or other slight offsets. # These slight offsets will break downstream code, which expects satellite data to be at # exactly 5 minutes past the hour. assert (datetime_index == datetime_index.round("5T")).all() return data_array
3,972
async def get_pedigree( internal_family_ids: List[int] = Query(None), response_type: ContentType = ContentType.JSON, replace_with_participant_external_ids: bool = True, replace_with_family_external_ids: bool = True, include_header: bool = True, empty_participant_value: Optional[str] = None, connection: Connection = get_project_readonly_connection, include_participants_not_in_families: bool = False, ): """ Generate tab-separated Pedigree file for ALL families unless internal_family_ids is specified. Allow replacement of internal participant and family IDs with their external counterparts. """ family_layer = FamilyLayer(connection) assert connection.project pedigree_dicts = await family_layer.get_pedigree( project=connection.project, family_ids=internal_family_ids, replace_with_participant_external_ids=replace_with_participant_external_ids, replace_with_family_external_ids=replace_with_family_external_ids, empty_participant_value=empty_participant_value, include_participants_not_in_families=include_participants_not_in_families, ) if response_type in (ContentType.CSV, ContentType.TSV): delim = '\t' if response_type == ContentType.TSV else ',' output = io.StringIO() writer = csv.writer(output, delimiter=delim) if include_header: writer.writerow(PedRow.row_header()) keys = [ 'family_id', 'individual_id', 'paternal_id', 'maternal_id', 'sex', 'affected', ] pedigree_rows = [[(row[k] or '') for k in keys] for row in pedigree_dicts] writer.writerows(pedigree_rows) basefn = f'{connection.project}-{date.today().isoformat()}' if internal_family_ids: basefn += '-'.join(str(fm) for fm in internal_family_ids) extension = 'ped' if response_type == ContentType.TSV else 'csv' return StreamingResponse( iter(output.getvalue()), media_type=f'text/{response_type}', headers={'Content-Disposition': f'filename={basefn}.{extension}'}, ) return pedigree_dicts
3,973
def load_genesets(): """ Action: Opens core_gene_sets file, sets gsa info with the same name to the current row. Opens the WGCNA Modules, if a row has missing data, an error is raised, each row coulmn is then set to the values in loading. Then we read the rgd vs go file. if values are blank, an exception is raised. then the geneset id is set to 1. Then if the row doesnt exist in gsa_info at that row is set to the values. Then MSigDB signature vs. gene pairs file is read. IF the subcategory is RegNet it is changed from MSigDB to RegNet. then the value in the current gsa_genes is set to 1. If there is no value in row['sig_name'] then it is generated. if n_genes < 3 or n_genes > 5000 then we drop those sigs. We then update or create the object. Then we create GeneSetMember objects. Returns: none Notes: Can this be broken up for readability? """ cf = os.path.join(settings.BASE_DIR, config['DEFAULT']['core_gene_sets']) logger.info('Loading core gene sets from file %s', cf) gsa_info = collections.defaultdict(dict) gsa_genes = collections.defaultdict(dict) with open(cf) as f: reader = csv.DictReader(f, delimiter='\t') for row in reader: nm = row['name'] if gsa_info.get(nm, None) is not None: logger.fatal('Conflicting names in %s; gene set names must be unique', cf) raise RuntimeError() gsa_info[nm] = row # read module members - overlaps partially with init_modules in Computation class but we need the gene members # in the database for drill down of visualizations module_file = os.path.join(settings.BASE_DIR, 'data/WGCNA_modules.txt') req_attr_m = ['module', 'rat_entrez_gene_id', 'loading'] with open(module_file) as f: reader = csv.DictReader(f, delimiter='\t') for row in reader: if any(row[i] == '' for i in req_attr_m): logger.fatal('File %s contains undefined values for one or more required attributes %s on line %s', module_file, ",".join(req_attr_m), row) raise RuntimeError() if not row['module'] in gsa_info: logger.warning('Module %s is not defined in core_sets; unexpected and skipping', row['module']) continue gsa_genes[row['module']][int(row['rat_entrez_gene_id'])] = float(row['loading']) # read GO vs. gene pairs from flat file go_file = os.path.join(settings.BASE_DIR, 'data/rgd_vs_GO_expansion.txt') req_attr_go = ['entrez_gene_id', 'GO_id', 'GO_name', 'GO_type'] with open(go_file) as f: reader = csv.DictReader(f, delimiter='\t') for row in reader: if any(row[i] == '' for i in req_attr_go): logger.fatal('File %s contains undefined values for one or more required attributes %s on line %s', go_file, ",".join(req_attr_go), row) raise RuntimeError() gsa_genes[row['GO_id']][int(row['entrez_gene_id'])] = 1 if not row['GO_id'] in gsa_info: gsa_info[row['GO_id']] = {'name': row['GO_id'], 'desc': row['GO_name'], 'type': row['GO_type'], 'core_set': False, 'source': 'GO'} # read MSigDB signature vs. gene pairs from flat file msigdb_file = os.path.join(settings.BASE_DIR, 'data/MSigDB_and_TF_annotation.txt') req_attr_msigdb = ['sig_name', 'rat_entrez_gene', 'sub_category', 'description'] with open(msigdb_file) as f: reader = csv.DictReader(f, delimiter='\t') for row in reader: if any(row[i] == '' for i in req_attr_msigdb): logger.fatal('File %s contains undefined values for one or more required attributes %s on line %s', msigdb_file, ",".join(req_attr_msigdb), row) raise RuntimeError() source = 'MSigDB' # DAS RegNet networks included in this file - use a separate source for these, not MSigDB if row['sub_category'] == 'RegNet': source = 'RegNet' gsa_genes[row['sig_name']][int(row['rat_entrez_gene'])] = 1 if not row['sig_name'] in gsa_info: gsa_info[row['sig_name']] = {'name': row['sig_name'], 'desc': row['description'], 'type': row['sub_category'], 'core_set': False, 'source': source} # eliminate gene sets too small / too large sigs_to_drop = list() for sig in gsa_info.keys(): if gsa_info[sig]['core_set']: continue # don't remove a core set ... shouldn't be any anyway that are too small/big n_genes = len(list(filter(lambda x: compute.get_gene_obj(x) is not None, gsa_genes[sig]))) if n_genes < 3 or n_genes > 5000: sigs_to_drop.append(sig) continue logger.debug('Eliminated %s gene sets based on size constraint', len(sigs_to_drop)) for s in sigs_to_drop: gsa_info.pop(s) gsa_genes.pop(s) updatecount = 0 createcount = 0 for sig in gsa_info: if sig not in gsa_genes: logger.error('No genes defined for signature %s; deleting geneset', sig) continue row = gsa_info[sig] # replace empty values with None - DB expects Null for k in row: row[k] = None if row[k] == '' else row[k] if row[k] == 'TRUE': row[k] = True if row[k] == 'FALSE': row[k] = False geneset = GeneSets.objects.filter(name=row['name']).first() if geneset: for (key, value) in row.items(): setattr(geneset, key, value) geneset.save() updatecount += 1 else: geneset = GeneSets.objects.create(**row) createcount += 1 # delete any existing genes for the signature geneset.members.clear() genes_skipped = 0 genes_loaded = 0 for rat_eg in gsa_genes[sig]: gene = compute.get_gene_obj(rat_eg) # geneobj will be None for genes not loaded in the gene model, warn on total skipped only if not gene: genes_skipped += 1 continue weight = gsa_genes[sig][rat_eg] GeneSetMember.objects.create(geneset=geneset, gene=gene, weight=weight) genes_loaded += 1 try: faction_loaded = genes_loaded/(genes_loaded+genes_skipped) except: logger.error('Attempting division by zero; no genes in sig %s', sig) continue if genes_loaded == 0: logger.error('No genes were added to geneset %s; deleting it', sig) geneset.delete() continue elif faction_loaded < 0.7: logger.warning('Fewer than 70 percent of genes in signature %s were in gene model and loaded: %s skipped and %s loaded',\ sig, genes_skipped, genes_loaded) elif genes_skipped > 0: logger.debug('Somes genes in signature %s are not in the gene model and skipped: %s skipped and %s loaded',\ sig, genes_skipped, genes_loaded) else: logger.debug('Number of genes loaded for signature %s: %s', sig, genes_loaded) logging.info('Number of core gene sets created: %s, number updated: %s', createcount, updatecount)
3,974
def lang_string_set_to_xml(obj: model.LangStringSet, tag: str) -> etree.Element: """ serialization of objects of class LangStringSet to XML :param obj: object of class LangStringSet :param tag: tag name of the returned XML element (incl. namespace) :return: serialized ElementTree object """ et_lss = _generate_element(name=tag) for language in obj: et_lss.append(_generate_element(name=NS_AAS + "langString", text=obj[language], attributes={"lang": language})) return et_lss
3,975
def grasp_from_contacts(contact1,contact2): """Helper: if you have two contacts, this returns an AntipodalGrasp""" d = vectorops.unit(vectorops.sub(contact2.x,contact1.x)) grasp = AntipodalGrasp(vectorops.interpolate(contact1.x,contact2.x,0.5),d) grasp.finger_width = vectorops.distance(contact1.x,contact2.x) grasp.contact1 = contact1 grasp.contact2 = contact2 return grasp
3,976
def choose_move(data: dict) -> str: """ data: Dictionary of all Game Board data as received from the Battlesnake Engine. For a full example of 'data', see https://docs.battlesnake.com/references/api/sample-move-request return: A String, the single move to make. One of "up", "down", "left" or "right". Use the information in 'data' to decide your next move. The 'data' variable can be interacted with as a Python Dictionary, and contains all of the information about the Battlesnake board for each move of the game. """ my_head = data["you"]["head"] # A dictionary of x/y coordinates like {"x": 0, "y": 0} my_body = data["you"]["body"] # A list of x/y coordinate dictionaries like [ {"x": 0, "y": 0}, {"x": 1, "y": 0}, {"x": 2, "y": 0} ] # TODO: uncomment the lines below so you can see what this data looks like in your output! print(f"~~~ Turn: {data['turn']} Game Mode: {data['game']['ruleset']['name']} ~~~") print(f"All board data this turn: {data}") print(f"My Battlesnakes head this turn is: {my_head}") print(f"My Battlesnakes body this turn is: {my_body}") possible_moves = ["up", "down", "left", "right"] # Don't allow your Battlesnake to move back in on it's own neck possible_moves = avoid_my_neck(my_head, my_body, possible_moves) # TODO: Using information from 'data', find the edges of the board and don't let your Battlesnake move beyond them board_height = data["board"]["height"] board_width = data["board"]["width"] if my_head["x"] == 0: possible_moves = remove("left", possible_moves) if my_head["y"] == 0: possible_moves = remove("down", possible_moves) if my_head["x"] == (board_width - 1): possible_moves = remove("right", possible_moves) if my_head["y"] == (board_height - 1): possible_moves = remove("up", possible_moves) # TODO Using information from 'data', don't let your Battlesnake pick a move that would hit its own body for square in my_body: if square["x"] == my_head["x"] and (square["y"] - my_head["y"]) == 1: possible_moves = remove("up", possible_moves) elif square["x"] == my_head["x"] and (square["y"] - my_head["y"]) == -1: possible_moves = remove("down", possible_moves) elif (square["x"] - my_head["x"]) == 1 and square["y"] == my_head["y"]: possible_moves = remove("right", possible_moves) elif (square["x"] - my_head["x"]) == -1 and square["y"] == my_head["y"]: possible_moves = remove("left", possible_moves) # TODO: Using information from 'data', don't let your Battlesnake pick a move that would collide with another Battlesnake opponents = data["board"]["snakes"][1:] for opp in opponents: if {"x": my_head["x"], "y": (my_head["y"] + 1)} in opp["body"]: possible_moves = remove("up", possible_moves) if {"x": my_head["x"], "y": (my_head["y"] - 1)} in opp["body"]: possible_moves = remove("down", possible_moves) if {"x": (my_head["x"] + 1), "y": my_head["y"]} in opp["body"]: possible_moves = remove("right", possible_moves) if {"x": (my_head["x"] - 1), "y": my_head["y"]} in opp["body"]: possible_moves = remove("left", possible_moves) # TODO: Using information from 'data', make your Battlesnake move towards a piece of food on the board food = data["board"]["food"] health = data["you"]["health"] length = data["you"]["length"] if health <= 20 and length < 11: closeFood = closestFood(my_head, food) if closeFood[0] == 1 and safe(opponents, closeFood[1]): move = directionToMove(my_head, closeFood[1]) if move in possible_moves: return move else: point = closeFood[1] # moves towards the closest piece of food if point["x"] > my_head["x"]: possible_moves = remove("left", possible_moves) if point["x"] < my_head["x"]: possible_moves = remove("right", possible_moves) if point["y"] > my_head["y"]: possible_moves = remove("down", possible_moves) if point["y"] < my_head["y"]: possible_moves = remove("up", possible_moves) # Choose a random direction from the remaining possible_moves to move in, and then return that move # TODO: Explore new strategies for picking a move that are better than random # makes sure not to collide with itself in the future if ({"x": my_head["x"], "y": (my_head["y"] + 2)} in my_body): possible_moves = remove("up", possible_moves) if ({"x": my_head["x"], "y": (my_head["y"] - 2)} in my_body): possible_moves = remove("down", possible_moves) if ({"x": (my_head["x"] + 2), "y": my_head["y"]} in my_body): possible_moves = remove("right", possible_moves) if ({"x": (my_head["x"] - 2), "y": my_head["y"]} in my_body): possible_moves = remove("left", possible_moves) if len(possible_moves) > 1: # checks for head to heads if ("up" in possible_moves) and not safe(opponents, {"x": my_head["x"], "y": (my_head["y"] + 1)}): possible_moves = remove("up", possible_moves) if ("down" in possible_moves) and not safe(opponents, {"x": my_head["x"], "y": (my_head["y"] - 1)}): possible_moves = remove("down", possible_moves) if ("right" in possible_moves) and not safe(opponents, {"x": (my_head["x"] + 1), "y": my_head["y"]}): possible_moves = remove("right", possible_moves) if ("left" in possible_moves) and not safe(opponents, {"x": (my_head["x"] - 1), "y": my_head["y"]}): possible_moves = remove("left", possible_moves) # if len(possible_moves) > 1: # # prevents getting stuck in a corner # awayFromCorners(my_head, possible_moves, board_height, board_width) move = random.choice(possible_moves) print(f"{data['game']['id']} MOVE {data['turn']}: {move} picked from all valid options in {possible_moves}") return move
3,977
def get_fremont_data(filename='Fremont.csv', url=FREMONT_URL, force_download=False): """Download and cache the fremont data Parameters ---------- filename : string (optional) location to save the data url : string (optional) web location of the data force_download : bool (optional) if True, force redownload of data Returns ------- data : pandas.DataFrame The fremont bridge data """ if force_download or not os.path.exists(filename): urlretrieve(url, filename) #Before: #data = pd.read_csv('Fremont.csv', index_col='Date', parse_dates=True) #make parse string of Date and make it an index #After: x20 faster # look at http://strftime.org/ data = pd.read_csv('Fremont.csv', index_col='Date') try: #data.index = pd.to_datetime(data.index, format='%m/%d/%Y %H:%M:%S %p') data.index = pd.to_datetime(data.index, format='%m/%d/%Y %I:%M:%S %p') #I for 12 hour color with AM/PM except TypeError: data.index = pd.to_datetime(data.index) #infer it automatically takes alot of time x10 at least data.columns = ['East', 'West'] data['Total'] = data['West'] + data['East'] return data
3,978
def boolean_dumper(dumper, value): """ Dump booleans as yes or no strings. """ value = u'yes' if value else u'no' style = None return dumper.represent_scalar(u'tag:yaml.org,2002:bool', value, style=style)
3,979
def preprocess_data_4_catboost(data_df, output_path=None): """ preprocess data for working with gradient boosting techniques specifically with the catboost library. since this is going to use the preprocessing built into the catboost library there are slightly different steps to be done """ """ train_data = Pool( data=FeaturesData( num_feature_data=np.array([[1, 4, 5, 6], [4, 5, 6, 7], [30, 40, 50, 60]], dtype=np.float32), cat_feature_data=np.array([[b"a", b"b"], [b"a", b"b"], [b"c", b"d"]], dtype=object) ), label=[1, 1, -1] ) """ new_df_w_labels = data_df.copy() for idx, odds_string in data_df.ODDS.iteritems(): # skip data qual errors and abnormalities if not isinstance(odds_string, str): continue divied_list = _preprocess_odds_string(odds_string) for school_or_perc in divied_list: if school_or_perc in SCHOOLS_REVERSED.keys(): school_idx = divied_list.index(school_or_perc) # the percent is always the next index after the school perc = divied_list[school_idx + 1] # print "School: {};Odds: {}".format(school_or_perc,perc) # use the standardized name standard_school_name = SCHOOLS_REVERSED[school_or_perc] # insert the specific name value for the correct row new_df_w_labels.at[idx, standard_school_name] = _parse_str_nums(perc) new_df_w_labels = _reduce_majors_dimensionality(new_df_w_labels) # drop unused columns data_after_drop = new_df_w_labels.drop(['ODDS', 'INTERNATIONAL', 'JOBTITLE'], axis=1, inplace=False) # change categorical data into numeric categorical_cols = ['UNIVERSITY', 'MAJOR', 'GENDER', 'RACE'] # a dataframe of ONLY the features features_only_df = data_after_drop.drop(TARGET_LABELS, axis=1, inplace=False) # determine the columns that are features by subtracting from labels feature_cols = set(data_after_drop.columns) - set(TARGET_LABELS) # a dataframe with ONLY labels labels = data_after_drop.drop(feature_cols, axis=1, inplace=False) multi_data_set_dict = {} for school in labels.columns: df_for_school = features_only_df.join(pd.DataFrame({school: labels[school]})) # a holder dictionary that contains the features numpy ndarray for features and numpy ndarray for school label school_dict = {} # drop the NaNs from the dataset in any feature column or label. otherwise model training will fail df_for_school.dropna(inplace=True) # store the features as a numpy ndarray to be fed directly to model training numerical_features_np_array = df_for_school.drop([school] + categorical_cols, axis=1, inplace=False).values categorical_features_np_array = df_for_school[categorical_cols].values # store the labels for a particular school as a numpy ndarray to be fed directly to model training labels_as_list = df_for_school.drop(feature_cols, axis=1, inplace=False)[school].tolist() datasetpool = Pool( data=FeaturesData( num_feature_data=np.array(numerical_features_np_array, dtype=np.float32), cat_feature_data=np.array(categorical_features_np_array, dtype=object) ), label=labels_as_list ) multi_data_set_dict[school] = datasetpool return multi_data_set_dict
3,980
def conv_current_to_electrons_second(current): """ Convert a current in Amps to a number of electrons per second. """ return int(current / const.electron_charge)
3,981
def multigraph(der, prime): """ Graphs the analytical and discrete derivatives of a function. """ fig = plt.figure(1) x = np.linspace(1/1000, 1, 101) y1 = der y2 = prime(x) plt.plot(x, y1, 'b-') plt.plot(x, y2, 'r-') plt.xlabel('x') plt.ylabel('y') plt.title('Analytical vs Discrete derivatives') plt.show()
3,982
def get_users(): """ Use urllib3 to make a REST call to get list of Okta Users for a given Okta Application """ request_url = f"{OKTA_URL}/apps/{OKTA_APP_ID}/users" okta_users_request = HTTP.request( 'GET', request_url, headers={'Content-Type': 'application/json', 'Authorization': OKTA_AUTH}, retries=False, ) LOGGER.info(f"Retrieved Okta Users Information from {request_url}") users = json.loads(okta_users_request.data.decode('utf-8')) return users
3,983
def n_sample_per_class_train_set(df, n_samples=3, class_column="category"): """ returns a subset of the provided df that contains n_samples instances of each class :param df: panda dataframe that contains hidden_reps with class labels :param n_samples: number of samples per class :param class_column: column with class labels in the df :return: subset of the original df that contains maximum n_samples instances of each class """ assert class_column in df.columns classes = list(set(df[class_column])) class_count_dict = dict([(c, 0) for c in classes]) selection_array = [] for i, c in zip(df.index, df[class_column]): if class_count_dict[c] >= n_samples: selection_array.append(False) continue else: selection_array.append(True) class_count_dict[c] += 1 print(len(class_count_dict), len(selection_array)) assert len(selection_array) == len(df.index) return df.copy()[selection_array]
3,984
def clip_count(cand_d, ref_ds): """Count the clip count for each ngram considering all references.""" count = 0 for m in cand_d.keys(): m_w = cand_d[m] m_max = 0 for ref in ref_ds: if m in ref: m_max = max(m_max, ref[m]) m_w = min(m_w, m_max) count += m_w return count
3,985
def container_wrapper(directive, literal_node, caption, classes): """adapted from https://github.com/sphinx-doc/sphinx/blob/master/sphinx/directives/code.py """ container_node = docutils.nodes.container( '', literal_block=True, classes=classes) # ['literal-block-wrapper'] parsed = docutils.nodes.Element() directive.state.nested_parse(StringList([caption], source=''), directive.content_offset, parsed) if isinstance(parsed[0], docutils.nodes.system_message): msg = 'Invalid caption: %s' % parsed[0].astext() raise ValueError(msg) elif isinstance(parsed[0], docutils.nodes.Element): caption_node = docutils.nodes.caption(parsed[0].rawsource, '', *parsed[0].children) caption_node.source = literal_node.source caption_node.line = literal_node.line container_node += caption_node container_node += literal_node return container_node else: raise RuntimeError
3,986
def list_policies(Filter=None, NextToken=None, MaxResults=None): """ Retrieves the list of all policies in an organization of a specified type. This operation can be called only from the organization's master account. See also: AWS API Documentation :example: response = client.list_policies( Filter='SERVICE_CONTROL_POLICY', NextToken='string', MaxResults=123 ) :type Filter: string :param Filter: [REQUIRED] Specifies the type of policy that you want to include in the response. :type NextToken: string :param NextToken: Use this parameter if you receive a NextToken response in a previous request that indicates that there is more output available. Set it to the value of the previous call's NextToken response to indicate where the output should continue from. :type MaxResults: integer :param MaxResults: (Optional) Use this to limit the number of results you want included in the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the maximum you specify, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that Organizations might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results. :rtype: dict :return: { 'Policies': [ { 'Id': 'string', 'Arn': 'string', 'Name': 'string', 'Description': 'string', 'Type': 'SERVICE_CONTROL_POLICY', 'AwsManaged': True|False }, ], 'NextToken': 'string' } """ pass
3,987
def tune_train(config): """Train the model with a hypyer-parameter tuner (ray). Args: config (dict): All the parameters for the model. """ data = config["data"] train_engine = NeuCF(munchify(config)) result = train_engine.train(data) while train_engine.eval_engine.n_worker > 0: time.sleep(20) tune.report( valid_metric=result["valid_metric"], model_save_dir=result["model_save_dir"], )
3,988
def test_after_canonical_ife_is_finalized_inputs_are_not_exited_when_ife_is_restarted_and_non_canonical(testlang, plasma_framework, token): """ 1. Alice and Bob send a canonical transaction transfering their funds to Alice. 2. Alice starts an in-flight exit and exits her output. 3. Bob creates a competing transaction spending his input. 4. In-flight exit is restarted and is non-canonical as Bob produces a comepeting transaction. 5. Alice and Bob piggyback on their inputs and exit is processed. 6. Funds from inputs are not withdrawn as they were marked as finalized when Alice exited her output in step 2. """ alice, bob, deposit_amount = testlang.accounts[0], testlang.accounts[1], 100 alice_deposit_id = testlang.deposit(alice, deposit_amount) bob_deposit_id = testlang.deposit(bob, deposit_amount) alice_output_amount = 2 * deposit_amount spend_id = testlang.spend_utxo([alice_deposit_id, bob_deposit_id], [alice, bob], [(alice.address, NULL_ADDRESS, alice_output_amount)]) alice_eth_balance_before = testlang.get_balance(alice) # transaction is not included in child chain and canonical in-flight exit is started and processed testlang.start_in_flight_exit(spend_id) testlang.piggyback_in_flight_exit_output(spend_id, 0, alice) testlang.forward_timestamp(2 * MIN_EXIT_PERIOD + 1) testlang.process_exits(NULL_ADDRESS, 0, 1) # alice exits with her Eth output alice_eth_balance = testlang.get_balance(alice) assert alice_eth_balance == alice_eth_balance_before + alice_output_amount # bob spends his inputs in a competing transaction competing_spend_id = testlang.spend_utxo( [bob_deposit_id], [bob], [(bob.address, NULL_ADDRESS, deposit_amount)], force_invalid=True ) # in-flight exit is restarted testlang.start_in_flight_exit(spend_id) # it's canonicity is challenged with the competing transaction testlang.challenge_in_flight_exit_not_canonical(spend_id, competing_spend_id, account=bob) # in-flight exit is not canonical in_flight_exit = testlang.get_in_flight_exit(spend_id) assert not in_flight_exit.is_canonical # owners piggyback on inputs testlang.piggyback_in_flight_exit_input(spend_id, 0, alice) testlang.piggyback_in_flight_exit_input(spend_id, 1, bob) eth_balance_before_processing_exits = testlang.get_balance(plasma_framework.eth_vault) # exit is processed testlang.forward_timestamp(2 * MIN_EXIT_PERIOD + 1) testlang.process_exits(NULL_ADDRESS, 0, 2) # users didn't exit their inputs - no funds were withdrawn from the eth vault # because in-flight transaction output is already exited and the inputs were flagged as finalized eth_balance = testlang.get_balance(plasma_framework.eth_vault) assert eth_balance_before_processing_exits == eth_balance
3,989
def get_current_and_next_quarters(request, num): """ Returns the current and next num uw_sws.models.Term objects in a list for the current quarter refered in the user session. Returns the next num -1 quarters along with the current one. """ term = get_current_quarter(request) quarters = [term] for x in range(1, num): term = get_term_after(term) quarters.append(term) return quarters
3,990
def _create_metadata_from_dat_df( csv_df: pd.DataFrame, ) -> Tuple[Dict[int, tuple], Pitch]: """Creates meta information from the CSV file as parsed by pd.read_csv(). Parameters ---------- csv_df: DataFrame Containing all data from the positions CSV file as DataFrame. Returns ------- periods: Dict[int, int] Dictionary with start and endframes: ``periods[segment] = (startframe, endframe)``. pitch: Pitch Playing Pitch object. """ # create pitch pi_len = csv_df["pitch_dimension_long_side"].values[0] pi_wid = csv_df["pitch_dimension_short_side"].values[0] pitch = Pitch.from_template( "statsperform", length=pi_len, width=pi_wid, sport="football", ) # create periods for segments, coded as jumps in the frame sequence periods = {} frame_values = csv_df["frame_count"].unique() seg_idx = np.where(np.diff(frame_values, prepend=frame_values[0]) > 1) seg_idx = np.insert(seg_idx, 0, 0) seg_idx = np.append(seg_idx, len(frame_values)) for segment in range(len(seg_idx) - 1): start = int(frame_values[seg_idx[segment]]) end = int(frame_values[seg_idx[segment + 1] - 1]) periods[segment] = (start, end) return periods, pitch
3,991
def _execute(workbench: thonny.workbench.Workbench, command: _Command) -> None: """ Execute the CrossHair command. Depending on the ``command``, different checks are performed (*e.g.*, whole file, single function, watch & check *etc.*). """ editor = workbench.get_editor_notebook().get_current_editor() if editor is None: tkinter.messagebox.showerror( title="No active editor", message="No file is currently edited. " "Hence CrossHair check can not be performed.", ) return filename = editor.get_filename() if filename is None: tkinter.messagebox.showerror( title="No file name", message="The current file has not been saved and does not have a name so " "it can not be checked with CrossHair. " "Please save the file first.", ) return editor.save_file() if command == _Command.CHECK: cmd = ["!", "crosshair", "check", filename] elif command == _Command.CHECK_AT: selection = editor.get_code_view().get_selected_range() # fmt: off cmd = [ "!", "crosshair", "check", f"{filename}:{selection.lineno}", ] # fmt: on elif command == _Command.WATCH: cmd = ["!", "crosshair", "watch", filename] else: raise NotImplementedError(f"Unhandled command: {command}") cmd_line = subprocess.list2cmdline(cmd) thonny.get_shell().text.submit_command(cmd_line=cmd_line + "\n", tags=("magic",)) return
3,992
def setup_run_args(parser): """ Add common arguments for all run scripts. """ parser.add_argument('--pennant-input', dest='pennant_input', action='store', type=str, default="PENNANT/test/leblancx4/leblancx4.pnt", help='Path to the input file (see .pnt files in test' + 'directory of the PENNANT source tarball). ' + 'Absolute path or relative to the app ' + 'directory. Default is leblancx4.pnt') parser.add_argument('--pennant-cores-per-node', dest='pennant_cores_per_node', action='store', type=int, help='Number of physical cores to reserve for the app. ' + 'If not defined, highest even number of cores less than ' + 'the total number in the node will be reserved (leaving at ' + 'least one node for GEOPM).') parser.add_argument('--pennant-cores-per-rank', dest='pennant_cores_per_rank', action='store', type=int, default=1, help='Number of physical cores to use per rank for OMP parallelism.' + 'Default is 1.') parser.add_argument('--epoch-to-outerloop', dest='epoch_to_outerloop', action='store', type=int, help='Chooses an appropriate pennant build with a specific GEOPM ' + 'epoch marker per outer loop count. If not specified, built-in ' 'value for the problem size will be used.') parser.add_argument('--num-epochs', dest='num_epochs', action='store', type=int, help='Stop Pennant execution at the desired number of epochs (cycles in ' + 'Pennant terms). Has no effect if it is less than 1 or greater than the ' 'number of cycles for the dataset.')
3,993
def main(): """ Main function used in script, primarily used as a handle to get the output into stdout. """ # There are no args, but parse them just so help works print(process_files_json(), end="") return None
3,994
def train_generator(train_loader, test_loader, num_epoch=500, lr=0.0001, beta1=0.9, beta2=0.999): """Train a generator on its own. Args: train_loader: (DataLoader) a DataLoader wrapping the training dataset test_loader: (DataLoader) a DataLoader wrapping the test dataset num_epoch: (int) number of epochs performed during training lr: (float) learning rate of the discriminator and generator Adam optimizers beta1: (float) beta1 coefficient of the discriminator and generator Adam optimizers beta2: (float) beta1 coefficient of the discriminator and generator Adam optimizers Returns: generator: (nn.Module) the trained generator """ cuda = True if torch.cuda.is_available() else False print(f"Using cuda device: {cuda}") # check if GPU is used # Tensor type (put everything on GPU if possible) Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor # Output folder if not os.path.exists("./images/generator"): os.makedirs("./images/generator") # Loss function criterion = torch.nn.L1Loss() # A loss for a voxel-wise comparison of images like torch.nn.L1Loss # Initialize the generator generator = GeneratorUNet() if cuda: generator = generator.cuda() criterion.cuda() # Optimizer optimizer = torch.optim.Adam(generator.parameters(), lr=lr, betas=(beta1, beta2)) def sample_images(epoch): """Saves a generated sample from the validation set""" imgs = next(iter(test_loader)) real_A = imgs["T1"].type(Tensor) real_B = imgs["T2"].type(Tensor) fake_B = generator(real_A) img_sample = torch.cat((real_A.data, fake_B.data, real_B.data), -2) save_image(img_sample, f"./images/generator/epoch-{epoch}.png", nrow=5, normalize=True) # ---------- # Training # ---------- prev_time = time.time() for epoch in range(num_epoch): for i, batch in enumerate(train_loader): # Inputs T1-w and T2-w real_t1 = batch["T1"].type(Tensor) real_t2 = batch["T2"].type(Tensor) # Remove stored gradients optimizer.zero_grad() # Generate fake T2 images from the true T1 images fake_t2 = generator(real_t1) # Compute the corresponding loss loss = criterion(fake_t2, real_t2) # Compute the gradient and perform one optimization step loss.backward() optimizer.step() # -------------- # Log Progress # -------------- # Determine approximate time left batches_done = epoch * len(train_loader) + i batches_left = num_epoch * len(train_loader) - batches_done time_left = datetime.timedelta( seconds=batches_left * (time.time() - prev_time)) prev_time = time.time() # Print log sys.stdout.write( "\r[Epoch %d/%d] [Batch %d/%d] [Loss: %f] ETA: %s" % ( epoch + 1, num_epoch, i, len(train_loader), loss.item(), time_left, ) ) # Save images at the end of each epoch sample_images(epoch) return generator
3,995
def backup_file(filename, backup_dir=None): """Backup the file, if it exists. Backups versioning is supported.""" if isinstance(backup_dir, str): newname = os.path.join(backup_dir, os.path.basename(filename)) else: newname = filename version = 1 while os.access(newname, os.F_OK): newname = '{}.~{:02d}~'.format(filename, version) version += 1 if newname != filename: #module_logger.debug("Backing up file: {} --> {}".format(filename, newname)) try: os.rename(filename, newname) except Exception as err: module_logger.warning('Cannot create backup copy of {}: {}'.format(filename, err), exc_info=True)
3,996
def configure_logger(verbose: bool): """Configure logging :param verbose: display debug and info messages :return: nothing """ logger = logging.getLogger() logger.handlers = [] stdout = logging.StreamHandler(sys.stdout) stdout.setLevel(level=logging.WARNING) stdout.setFormatter(logging.Formatter(LOG_FORMAT)) logger.addHandler(stdout) if verbose: stdout.setLevel(level=logging.DEBUG) logger.setLevel(level=logging.DEBUG)
3,997
def expand(doc, doc_url="param://", params=None): """ ASSUMING YOU ALREADY PULED THE doc FROM doc_url, YOU CAN STILL USE THE EXPANDING FEATURE USE mo_json_config.expand({}) TO ASSUME CURRENT WORKING DIRECTORY :param doc: THE DATA STRUCTURE FROM JSON SOURCE :param doc_url: THE URL THIS doc CAME FROM (DEFAULT USES params AS A DOCUMENT SOURCE) :param params: EXTRA PARAMETERS NOT FOUND IN THE doc_url PARAMETERS (WILL SUPERSEDE PARAMETERS FROM doc_url) :return: EXPANDED JSON-SERIALIZABLE STRUCTURE """ if doc_url.find("://") == -1: Log.error("{{url}} must have a prototcol (eg http://) declared", url=doc_url) url = URL(doc_url) url.query = set_default(url.query, params) phase1 = _replace_ref(doc, url) # BLANK URL ONLY WORKS IF url IS ABSOLUTE phase2 = _replace_locals(phase1, [phase1]) return wrap(phase2)
3,998
def get_model_creator(hparams): """Get the right model class depending on configuration.""" if hparams.architecture == 'peng': model_creator = model.Model """vanilla lstm, seq2seq""" return model_creator
3,999