content
stringlengths
22
815k
id
int64
0
4.91M
def _live_tensors(f, attr_name="inputs"): """Returns the indices of the used inputs. Note: This currently only handles direct index accesses e.g. op.inputs[1]. If the function has slicing or list comprehension on attr_name then returns _ALL. This ensure that this is correct even if inefficient. Args: f: A grad function, taking the op as first argument. attr_name: op attr to track. "inputs" or "outputs". Returns: Either one of: * set of integers representing individual indices of inputs used * the value _ALL, if indices are used but cannot be determined which * empty set, if no inputs are used """ node, _ = parser.parse_entity(f, ()) entity_info = transformer.EntityInfo( name=f.__name__, source_code=None, source_file=None, future_features=(), namespace=sys.modules[f.__module__].__dict__) ctx = transformer.Context(entity_info, None, None) graphs = cfg.build(node) node = qual_names.resolve(node) node = activity.resolve(node, ctx, None) node = reaching_fndefs.resolve(node, ctx, graphs) node = liveness.resolve(node, ctx, graphs) op_arg_name = anno.getanno(node.args.args[0], anno.Basic.QN) op_inputs_outputs_name = qual_names.QN(op_arg_name, attr=attr_name) special_tracker = _SubscriptUseTracker(ctx, (op_inputs_outputs_name,)) node = special_tracker.visit(node) live_vars_in = anno.getanno(node.body[0], anno.Static.LIVE_VARS_IN) inputs_outputs_used_qns = set() for v in special_tracker.complex_reads: # Complicated patterns like op.inputs[:3]. Could be smarter about them # if they matter much. if v == op_inputs_outputs_name: return _ALL for v in live_vars_in: if v in special_tracker.reads: if (v.has_subscript() and v.parent == op_inputs_outputs_name): inputs_outputs_used_qns.add(v) elif v == op_inputs_outputs_name: # When op.{attr_name} is used directly, assume all tensors are # used for now. In that case, no point digging further. # TODO(mdan): We can descend into tuple expansions. return _ALL function_calls_tracker = _FunctionCallsTracker(ctx, op_arg_name) node = function_calls_tracker.visit(node) input_output_indices = set() for called_f in function_calls_tracker.calls: child_indices = _live_tensors(called_f, attr_name=attr_name) if child_indices is _ALL: return _ALL input_output_indices |= child_indices for v in inputs_outputs_used_qns: assert v.has_subscript() _, subscript = v.qn if not subscript.is_simple(): # Not a number, assuming it can be anything. return _ALL subscript_val, = subscript.qn if (not isinstance(subscript_val, qual_names.Literal) and not isinstance(subscript_val.value, int)): # Not a number, assuming it can be anything. return _ALL input_output_indices.add(subscript_val.value) return input_output_indices
3,000
def update_site_config(site_name, parameters): """Update the site config to establish the database settings""" site_directory = os.path.join('web', 'sites', site_name) if not os.path.isdir(site_directory): print('site directory {} missing'.format(site_directory)) sys.exit(-1) config_filename = os.path.join(site_directory, 'site.ini') if os.path.exists(config_filename): existing_config = configparser.ConfigParser() existing_config.read(config_filename) if existing_config.has_section('database'): print('database settings already exist in {}'.format( config_filename )) print(existing_config.options('database')) sys.exit(-1) new_config = configparser.RawConfigParser() new_config.add_section('database') for key, value in parameters.items(): if key == 'database': key = 'name' new_config.set('database', key, value) with open(config_filename, 'a') as configfile: new_config.write(configfile) return new_config
3,001
def plot_with_front(gen, front, title, fname): """ plot with front: Print the generation gen and front, highlighting front as the pareto front on the graph. Parameters: gen: The generation to plot. front: The pareto front extracted from generation gen title: Plot Title fname: path to output file for plot image. """ fig, ax = subplots() plot_inds(ax,gen,'Non-Dominant') plot_inds(ax,front,'Dominant') ax.set_title(title) ax.legend() fig.savefig(fname) return [fig, ax]
3,002
def find_closest_positive_divisor(a, b): """Return non-trivial integer divisor (bh) of (a) closest to (b) in abs(b-bh) such that a % bh == 0""" assert a>0 and b>0 if a<=b: return a for k in range(0, a-b+1): bh = b + k if bh>1 and a % bh == 0: return bh bh = b - k if bh>1 and a % bh == 0: return bh return a
3,003
def simplify_stl_names(decl): """Take common STL/Standard Library names and simplify them to help make the stack trace look more readable and less like the graphics in the matrix. """ p = simplify_template_call(decl) if p == []: return decl return p[0] + '<' + ', '.join(p[1:-1]) + '>::' + p[-1]
3,004
def check_exising_flags(ds_ind, stokes='I', client=None): """ Check the existing flags in the input Measurement Set.""" # Determine which polarization state to grid and flag if stokes=='I': flags = ds_ind.FLAG.data[:,0] | ds_ind.FLAG.data[:,-1] elif stokes=='Q': flags = ds_ind.FLAG.data[:,0] | ds_ind.FLAG.data[:,-1] elif stokes=='U': flags = ds_ind.FLAG.data[:,1] | ds_ind.FLAG.data[:,2] elif stokes=='V': flags = ds_ind.FLAG.data[:,1] | ds_ind.FLAG.data[:,2] elif stokes=='A': flags = da.sum(ds_ind.FLAG.data, axis=1, dtype=np.bool) else: raise ValueError(f"check_existing_flags: the stokes argument, \ '{stokes}', is not currently implemented, please select another value.") flag_loc = da.where(flags==True) if not client is None: flag_loc = client.compute(flag_loc) else: flag_loc = flag_loc[0].compute() nflags = len(flag_loc) nrows = len(flags) print( f"Rows alrady flagged: {(100*nflags/nrows):.1f}% ({nflags}/{nrows}),\ in file \"{ds_ind.attrs['Measurement Set']}\"." )
3,005
def sample_switching_models( models: Sequence, usage_seq: Sequence, X: Union[None, Sequence, Callable] = None, initial_conditions: Optional[Tuple[Sequence, Sequence]] = None, return_input: bool = False, ) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]: """ Sample from a non-stationary stochastic processes that switches between different ARMA models at given times. This functions sets the models' `history_` attribute appropriately to ensure consistency across time. Parameters ---------- models Sequence of models to use. usage_seq Sequence identifying the model to use at each time steps. Models are labeled from `0` to `len(models) - 1`. X If given, this overrides the input source for the models. If it is a sequence, it should be at least as long as `len(usage_seq)`. initial_conditions A tuple, `(initial_y, initial_x)`, of recent samples of the output and input sequences used to seed the simulation. If these are not provided, they are assumed equal to zero. return_input If true, returns both output and input. If false (the default), returns only the output. Returns a sequence `Y` of generated samples. If `return_input` is true, returns a tuple `(Y, X)` of generated output samples and input samples. If the `U` parameter was used and was a sequence, the output `X` simply mirrors the input. """ # check the inputs if len(models) == 0: raise ValueError("No models given.") if np.min(usage_seq) < 0 or np.max(usage_seq) >= len(models): raise ValueError("Invalid entry in usage_seq vector.") # handle vector X if X is not None and not callable(X): if len(X) < len(usage_seq): raise ValueError("Not enough input values in X.") X_ret = X X = sources.Stream(X) have_X_ret = True else: X_ret = np.zeros(len(usage_seq)) have_X_ret = False # handle default initial conditions if initial_conditions is None: initial_conditions = ([], []) # generate the samples Y_ret = np.zeros(len(usage_seq)) usage_rle = rle_encode(usage_seq) ptr = 0 for model_id, n_samples in usage_rle: model = models[model_id] # ensure proper history if ptr >= model.p: history_y = np.copy(Y_ret[ptr - model.p : ptr]) else: n_left = model.p - ptr if len(initial_conditions[0]) >= n_left: history_y = np.hstack((initial_conditions[0][-n_left:], Y_ret[:ptr])) else: history_y = np.hstack( ( np.zeros(n_left - len(initial_conditions[0])), initial_conditions[0], Y_ret[:ptr], ) ) if ptr >= model.q: history_x = np.copy(X_ret[ptr - model.q : ptr]) else: n_left = model.q - ptr if len(initial_conditions[1]) >= n_left: history_x = np.hstack((initial_conditions[1][-n_left:], X_ret[:ptr])) else: history_x = np.hstack( ( np.zeros(n_left - len(initial_conditions[1])), initial_conditions[1], X_ret[:ptr], ) ) model.history_ = (history_y, history_x) # generate and store the samples from this model crt_y, crt_x = model.transform(n_samples, X=X, return_input=True) Y_ret[ptr : ptr + n_samples] = crt_y if not have_X_ret: X_ret[ptr : ptr + n_samples] = crt_x ptr += n_samples if return_input: return Y_ret, X_ret else: return Y_ret
3,006
def test_remove(tree_fixture): """Test removal of nodes. """ tree = tree_fixture[0] n = tree_fixture[1] tree.add(n[1]) tree.add(n[2]) tree.add(n[3], parent=n[1]) tree.add(n[4], parent=n[3]) tree.add(n[5], parent=n[4]) assert tree._nodes == [n[1], n[3], n[4], n[5], n[2]] all_ch = list(tree.get_all_children(n[1])) assert all_ch == [n[3], n[4], n[5]], all_ch tree.remove(n[4]) assert tree._nodes == [n[1], n[3], n[2]] tree.remove(n[1]) assert len(tree._children) == 2 assert tree._children[None] == [n[2]] assert tree._children[n[2]] == [] assert tree._nodes == [n[2]]
3,007
def eval_per_class(c_dets, c_truths, overlap_thresh=0.5, eval_phrase=False): """ Evaluation for each class. Args: c_dets: A dictionary of all detection results. c_truths: A dictionary of all ground-truth annotations. overlap_thresh: A float of the threshold used in IoU matching. Returns: scores_all: A list of numpy float array collecting the confidence scores of both truth positives and false positives in each image. tp_fp_labels_all: A list of numpy float array collecting the true positives (=1) and false positives (=0) labels in each image. num_gt_all: An integer of the total number of valid ground-truth boxes. """ num_gt_all = sum([len(c_truths[l]) for l in c_truths]) scores_all = [] tp_fp_labels_all = [] img_keys = [] for key in c_dets: img_keys.append(key) img_det = c_dets[key] num_det = len(img_det) scores = np.array([det['score'] for det in img_det]) tp_fp_labels = np.zeros(num_det, dtype=bool) if key not in c_truths or all(scores<0): # detections not in ground truth or detections have negative image level label, classified as false positives scores_all.append(scores) tp_fp_labels_all.append(tp_fp_labels) continue img_gt = c_truths[key] if eval_phrase: ious = np.array([[IoU(d['rect'], g['rect']) for g in img_gt] for d in img_det]) else: ious = np.array([[min(IoU(d['subject_rect'], g['subject_rect']), IoU(d['object_rect'], g['object_rect'])) for g in img_gt] for d in img_det]) if ious.shape[1] > 0: max_overlap_gt_ids = np.argmax(ious, axis=1) is_gt_box_detected = np.zeros(ious.shape[1], dtype=bool) for i in range(num_det): gt_id = max_overlap_gt_ids[i] if ious[i, gt_id] >= overlap_thresh: if not is_gt_box_detected[gt_id]: tp_fp_labels[i] = True is_gt_box_detected[gt_id] = True # if ious.shape[1] > 0: # max_overlap_gt_ids = np.argsort(-1*ious, axis=1) # is_gt_box_detected = np.zeros(ious.shape[1], dtype=bool) # for i in range(num_det): # for gt_id in max_overlap_gt_ids[i, :]: # if ious[i, gt_id] >= overlap_thresh: # if not is_gt_box_detected[gt_id]: # tp_fp_labels[i] = True # is_gt_box_detected[gt_id] = True # break # else: # break # num_gt = len(img_gt) # if ious.shape[1] > 0: # max_overlap_det_ids = np.argsort(-1*ious, axis=0) # is_det_box_used = np.zeros(ious.shape[0], dtype=bool) # for i in range(num_gt): # for det_id in max_overlap_det_ids[:, i]: # if ious[det_id, i] >= overlap_thresh: # if not is_det_box_used[det_id]: # tp_fp_labels[det_id] = True # is_det_box_used[det_id] = True # break # else: # break scores_all.append(scores) tp_fp_labels_all.append(tp_fp_labels) return scores_all, tp_fp_labels_all, num_gt_all, img_keys
3,008
def define_components(mod): """ Adds components to a Pyomo abstract model object to describe unit commitment for projects. Unless otherwise stated, all power capacity is specified in units of MW and all sets and parameters are mandatory. -- Commit decision, limits, and headroom -- CommitProject[(proj, t) in PROJ_DISPATCH_POINTS] is a decision variable of how much capacity (MW) from each project to commit in each timepoint. By default, this operates in continuous mode. Include the project.unitcommit.discrete module to force this to operate with discrete unit commitment. proj_max_commit_fraction[(proj, t) in PROJ_DISPATCH_POINTS] describes the maximum commit level as a fraction of available capacity (capacity that is built and expected to be available for commitment; derated by annual expected outage rate). This has limited use cases, but could be used to simulate outages (scheduled or non-scheduled) in a production-cost simulation. This optional parameter has a default value of 1.0, indicating that all available capacity can be commited. If you wish to have discrete unit commitment, I advise overriding the default behavior and specifying a more discrete treatment of outages. proj_min_commit_fraction[(proj, t) in PROJ_DISPATCH_POINTS] describes the minimum commit level as a fraction of available capacity. This is useful for describing must-run plants that ensure reliable grid operations, and for forcing hydro plants operate at some minimal level to maintain streamflow. This can also be used to specify baseload plants that must be run year-round. This optional parameter will default to proj_max_commit_fraction for generation technologies marked baseload and 0 for all other generators. CommitLowerLimit[(proj, t) in PROJ_DISPATCH_POINTS] is an expression that describes the minimum capacity that must be committed. This is derived from installed capacity and proj_min_commit_fraction. CommitUpperLimit[(proj, t) in PROJ_DISPATCH_POINTS] is an expression that describes the maximum capacity available for commitment. This is derived from installed capacity and proj_max_commit_fraction. Enforce_Commit_Lower_Limit[(proj, t) in PROJ_DISPATCH_POINTS] and Enforce_Commit_Upper_Limit[(proj, t) in PROJ_DISPATCH_POINTS] are constraints that limit CommitProject to the upper and lower bounds defined above. CommitLowerLimit <= CommitProject <= CommitUpperLimit CommitSlackUp[(proj, t) in PROJ_DISPATCH_POINTS] is an expression that describes the amount of additional capacity available for commitment: CommitUpperLimit - CommitProject CommitSlackDown[(proj, t) in PROJ_DISPATCH_POINTS] is an expression that describes the amount of committed capacity that could be taken offline: CommitProject - CommitLowerLimit -- Startup and Shutdown -- The capacity started up or shutdown is completely determined by the change in CommitProject from one hour to the next, but we can't calculate these directly directly within the linear program because linear programs don't have if statements. Instead, we'll define extra decision variables that are tightly constrained. Since startup incurs costs and shutdown does not, the linear program will not simultaneously set both of these to non-zero values. Startup[(proj, t) in PROJ_DISPATCH_POINTS] is a decision variable describing how much additional capacity was brought online in a given timepoint. Committing additional capacity incurs startup costs for fossil plants from fuel requirements as well as additional O&M costs. Shutdown[(proj, t) in PROJ_DISPATCH_POINTS] is a decision variable describing how much committed capacity to take offline in a given timepoint. Commit_Startup_Shutdown_Consistency[(proj, t) in PROJ_DISPATCH_POINTS] is a constraint that forces consistency between commitment decision from one hour to the next with startup and shutdown. g_startup_fuel[g in FUEL_BASED_GEN] describes fuel requirements of starting up additional generation capacity expressed in units of MMBTU / MW. This optional parameter has a default value of 0. proj_startup_fuel[proj in FUEL_BASED_PROJECTS] is the same as g_startup_fuel except on a project basis. This optional parameter defaults to g_startup_fuel. g_startup_om[g in GENERATION_TECHNOLOGIES] describes operations and maintenance costs incured from starting up additional generation capacity expressed in units of $base_year / MW. This could represent direct maintenance requirements or some overall depreciation rate from accelerated wear and tear. This optional parameter has a default value of 0. proj_startup_om[proj in PROJECTS] is the same as g_startup_om except on a project basis. This optional parameter defaults to g_startup_om. Total_Startup_OM_Costs[t in TIMEPOINTS] is an expression for passing total startup O&M costs to the sys_cost module. -- Dispatch limits based on committed capacity -- g_min_load_fraction[g] describes the minimum loading level of a generation technology as a fraction of committed capacity. Many fossil plants - especially baseload - have a minimum run level which should be stored here. Note that this is only applied to committed capacity. This is an optional parameter that defaults to 1 for generation technologies marked baseload and 0 for all other generators. This parameter is only relevant when considering unit commitment so it is defined here rather than the gen_tech module. proj_min_cap_factor[(proj, t) in PROJ_DISPATCH_POINTS] describes the minimum loadding level for each project and timepoint as a fraction of committed capacity. This is an optional parameter that defaults to g_min_load_fraction, which in turn defaults to 0. You may wish to vary this by timepoint to establish minimum flow rates for hydropower, to specify thermal demand for a cogeneration project, or specify must-run reliability constraints in a geographically or temporally detailed model. This could also be used to constrain dispatch of distributed solar resources that cannot be curtailed by the system operator. DispatchLowerLimit[(proj, t) in PROJ_DISPATCH_POINTS] and DispatchUpperLimit[(proj, t) in PROJ_DISPATCH_POINTS] are expressions that define the lower and upper bounds of dispatch. Lower bounds are calculated as CommitProject * proj_min_cap_factor, and upper bounds are calculated relative to committed capacity and renewable resource availability. Enforce_Dispatch_Lower_Limit[(proj, t) in PROJ_DISPATCH_POINTS] and Enforce_Dispatch_Upper_Limit[(proj, t) in PROJ_DISPATCH_POINTS] are constraints that limit DispatchProj to the upper and lower bounds defined above. DispatchLowerLimit <= DispatchProj <= DispatchUpperLimit DispatchSlackUp[(proj, t) in PROJ_DISPATCH_POINTS] is an expression that describes the amount of additional commited capacity available for dispatch: DispatchUpperLimit - DispatchProj DispatchSlackDown[(proj, t) in PROJ_DISPATCH_POINTS] is an expression that describes the amount by which dispatch could be lowered, that is how much downramp potential each project has in each timepoint: DispatchProj - DispatchLowerLimit """ # Commitment decision, bounds and associated slack variables mod.CommitProject = Var( mod.PROJ_DISPATCH_POINTS, within=NonNegativeReals) mod.proj_max_commit_fraction = Param( mod.PROJ_DISPATCH_POINTS, within=PercentFraction, default=lambda m, proj, t: 1.0) mod.proj_min_commit_fraction = Param( mod.PROJ_DISPATCH_POINTS, within=PercentFraction, default=lambda m, proj, t: ( m.proj_max_commit_fraction[proj, t] if proj in m.BASELOAD_PROJECTS else 0.0)) mod.CommitLowerLimit = Expression( mod.PROJ_DISPATCH_POINTS, initialize=lambda m, proj, t: ( m.ProjCapacityTP[proj, t] * m.proj_availability[proj] * m.proj_min_commit_fraction[proj, t])) mod.CommitUpperLimit = Expression( mod.PROJ_DISPATCH_POINTS, initialize=lambda m, proj, t: ( m.ProjCapacityTP[proj, t] * m.proj_availability[proj] * m.proj_max_commit_fraction[proj, t])) mod.Enforce_Commit_Lower_Limit = Constraint( mod.PROJ_DISPATCH_POINTS, rule=lambda m, proj, t: ( m.CommitLowerLimit[proj, t] <= m.CommitProject[proj, t])) mod.Enforce_Commit_Upper_Limit = Constraint( mod.PROJ_DISPATCH_POINTS, rule=lambda m, proj, t: ( m.CommitProject[proj, t] <= m.CommitUpperLimit[proj, t])) mod.CommitSlackUp = Expression( mod.PROJ_DISPATCH_POINTS, initialize=lambda m, proj, t: ( m.CommitUpperLimit[proj, t] - m.CommitProject[proj, t])) mod.CommitSlackDown = Expression( mod.PROJ_DISPATCH_POINTS, initialize=lambda m, proj, t: ( m.CommitProject[proj, t] - m.CommitLowerLimit[proj, t])) # Startup & Shutdown mod.Startup = Var( mod.PROJ_DISPATCH_POINTS, within=NonNegativeReals) mod.Shutdown = Var( mod.PROJ_DISPATCH_POINTS, within=NonNegativeReals) mod.Commit_Startup_Shutdown_Consistency = Constraint( mod.PROJ_DISPATCH_POINTS, rule=lambda m, pr, t: ( m.CommitProject[pr, m.tp_previous[t]] + m.Startup[pr, t] - m.Shutdown[pr, t] == m.CommitProject[pr, t])) mod.g_startup_fuel = Param(mod.FUEL_BASED_GEN, default=0.0) mod.g_startup_om = Param(mod.GENERATION_TECHNOLOGIES, default=0.0) mod.proj_startup_fuel = Param( mod.FUEL_BASED_PROJECTS, default=lambda m, pr: m.g_startup_fuel[m.proj_gen_tech[pr]]) mod.proj_startup_om = Param( mod.PROJECTS, default=lambda m, pr: m.g_startup_om[m.proj_gen_tech[pr]]) # Startup costs need to be divided over the duration of the # timepoint because it is a one-time expenditure in units of $ # but cost_components_tp requires an hourly cost rate in $ / hr. mod.Total_Startup_OM_Costs = Expression( mod.TIMEPOINTS, initialize=lambda m, t: sum( m.proj_startup_om[proj] * m.Startup[proj, t] / m.tp_duration_hrs[t] for (proj, t2) in m.PROJ_DISPATCH_POINTS if t == t2)) mod.cost_components_tp.append('Total_Startup_OM_Costs') # Dispatch limits relative to committed capacity. mod.g_min_load_fraction = Param( mod.GENERATION_TECHNOLOGIES, within=PercentFraction, default=lambda m, g: 1.0 if m.g_is_baseload[g] else 0.0) mod.proj_min_load_fraction = Param( mod.PROJ_DISPATCH_POINTS, default=lambda m, pr, t: m.g_min_load_fraction[m.proj_gen_tech[pr]]) mod.DispatchLowerLimit = Expression( mod.PROJ_DISPATCH_POINTS, initialize=lambda m, pr, t: ( m.CommitProject[pr, t] * m.proj_min_load_fraction[pr, t])) def DispatchUpperLimit_expr(m, pr, t): if pr in m.VARIABLE_PROJECTS: return m.CommitProject[pr, t] * m.prj_max_capacity_factor[pr, t] else: return m.CommitProject[pr, t] mod.DispatchUpperLimit = Expression( mod.PROJ_DISPATCH_POINTS, initialize=DispatchUpperLimit_expr) mod.Enforce_Dispatch_Lower_Limit = Constraint( mod.PROJ_DISPATCH_POINTS, rule=lambda m, proj, t: ( m.DispatchLowerLimit[proj, t] <= m.DispatchProj[proj, t])) mod.Enforce_Dispatch_Upper_Limit = Constraint( mod.PROJ_DISPATCH_POINTS, rule=lambda m, proj, t: ( m.DispatchProj[proj, t] <= m.DispatchUpperLimit[proj, t])) mod.DispatchSlackUp = Expression( mod.PROJ_DISPATCH_POINTS, initialize=lambda m, proj, t: ( m.DispatchUpperLimit[proj, t] - m.DispatchProj[proj, t])) mod.DispatchSlackDown = Expression( mod.PROJ_DISPATCH_POINTS, initialize=lambda m, proj, t: ( m.DispatchProj[proj, t] - m.DispatchLowerLimit[proj, t]))
3,009
def adjustwithin(df, pCol, withinCols, method='holm'): """Apply multiplicity adjustment to a "stacked" pd.DataFrame, adjusting within groups defined by combinations of unique values in withinCols Parameters ---------- df : pd.DataFrame Stacked DataFrame with one column of pvalues and other columns to define groups for adjustment. pCol : str Column containing pvalues. withinCols : list Columns used to define subgroups/families for adjustment. method : str An adjustment method for sm.stats.multipletests. Use 'holm' for Holm-Bonferroni FWER-adj and 'fdr_bh' for Benjamini and Hochberg FDR-adj Returns ------- adjSeries : pd.Series Same shape[0] as df containing adjusted pvalues/adjpvalues.""" def _transformFunc(ser, method): nonNan = ~ser.isnull() if nonNan.sum() >= 1: rej, adjp, alphas, alphab = sm.stats.multipletests(ser.loc[nonNan].values, method=method) out = ser.copy(deep=True) out.loc[nonNan] = adjp return out else: return ser if not len(withinCols) == 0: gby = df[[pCol] + withinCols].groupby(withinCols) adjDf = gby.transform(partial(_transformFunc, method=method)) # adjDf = df.drop(pCol, axis=1).join(adjDf) else: adjDf = pd.Series(adjustnonnan(df.loc[:, pCol], method=method), index=df.index, name='adjusted-pvalue') return adjDf
3,010
def parse_url_query_params(url, fragment=True): """Parse url query params :param fragment: bool: flag is used for parsing oauth url :param url: str: url string :return: dict """ parsed_url = urlparse(url) if fragment: url_query = parse_qsl(parsed_url.fragment) else: url_query = parse_qsl(parsed_url.query) # login_response_url_query can have multiple key url_query = dict(url_query) return url_query
3,011
def sample_random_lightdirs(num_rays, num_samples, upper_only=False): """Randomly sample directions in the unit sphere. Args: num_rays: int or tensor shape dimension. Number of rays. num_samples: int or tensor shape dimension. Number of samples per ray. upper_only: bool. Whether to sample only on the upper hemisphere. Returns: lightdirs: [R, S, 3] float tensor. Random light directions sampled from the unit sphere for each sampled point. """ if upper_only: min_z = 0 else: min_z = -1 phi = torch.rand(num_rays, num_samples) * (2 * math.pi) # [R, S] cos_theta = torch.rand(num_rays, num_samples) * (1 - min_z) + min_z # [R, S] theta = torch.acos(cos_theta) # [R, S] x = torch.sin(theta) * torch.cos(phi) y = torch.sin(theta) * torch.sin(phi) z = torch.cos(theta) lightdirs = torch.cat((x[..., None], y[..., None], z[..., None]), dim=-1) # [R, S, 3] return lightdirs
3,012
def gaussgen(sigma): """ Function to generate Gaussian kernels, in 1D, 2D and 3D. Source code in MATLAB obtained from Qiyuan Tian, Stanford University, September 2015 :param sigma: Sigma for use in generating Gaussian kernel (see defaults in generate_FSL_structure_tensor) :return: Gaussian kernel with dimensions of sigma. """ halfsize = np.ceil(3 * max(sigma)); x = range(np.single(-halfsize), np.single(halfsize + 1)); dim = len(sigma); if dim == 1: x = x.astype(float); k = np.exp(-x ** 2 / (2 * sigma ^ 2)); elif dim == 2: [X, Y] = np.meshgrid(x, x); X = X.astype(float); Y = Y.astype(float); k = np.exp(-X ** 2 / (2 * sigma[0] ** 2)) * np.exp(-Y ** 2 / (2 * sigma[1] ** 2)); elif dim == 3: [X, Y, Z] = np.meshgrid(x, x, x); X = X.transpose(0, 2, 1); # Obtained through vigorous testing (see below...) Y = Y.transpose(2, 0, 1); Z = Z.transpose(2, 1, 0); X = X.astype(float); Y = Y.astype(float); Z = Z.astype(float); k = np.exp(-X ** 2 / (2 * sigma[0] ** 2)) * np.exp(-Y ** 2 / (2 * sigma[1] ** 2)) * np.exp( -Z ** 2 / (2 * sigma[2] ** 2)); else: print 'Only supports up to dimension 3' return np.divide(k, np.sum(np.abs(k)));
3,013
def metrics_specs_from_keras( model_name: Text, model_loader: types.ModelLoader, ) -> List[config.MetricsSpec]: """Returns metrics specs for metrics and losses associated with the model.""" model = model_loader.construct_fn() if model is None: return [] metric_names = [] metrics = [] if hasattr(model, 'loss_functions'): # Legacy keras metrics separate the losses from the metrics and store them # under loss_functions. The first name in metric_names is always 'loss' # followed by the loss_function names (prefixed by output_name if multiple # outputs) and then followed by the metric names (also prefixed by output # name). Note that names in loss_functions will not have any output name # prefixes (if used) while the metrics will so we need to use the names in # metric_names for matching with outputs not the names in the functions. metric_names = model.metrics_names metrics.extend(model.loss_functions) metrics.extend(model.metrics) if len(metric_names) > len(metrics) and metric_names[0] == 'loss': metric_names = metric_names[1:] elif hasattr(model, 'compiled_loss') and hasattr(model, 'compiled_metrics'): # In the new keras metric setup the metrics include the losses (in the form # of a metric type not a loss type) and the metrics_names align with the # names in the metric classes. The metrics itself contains compiled_loss, # compiled_metrics, and custom metrics (added via add_metric). Since we only # care about compiled metrics we use these APIs instead. Note that the # overall loss metric is an average of the other losses which doesn't take # y_true, y_pred as inputs so it can't be calculated via standard inputs so # we remove it. metrics.extend(model.compiled_loss.metrics[1:]) metrics.extend(model.compiled_metrics.metrics) metric_names = [m.name for m in metrics] specs = [] # Need to check if model.output_names exists because the keras Sequential # model doesn't always contain output_names (b/150510258). if hasattr(model, 'output_names') and len(model.output_names) > 1: unmatched_metrics = {m for m in metrics} for output_name in model.output_names: per_output_metrics = [] for (name, metric) in zip(metric_names, metrics): if name.startswith(output_name + '_'): per_output_metrics.append(metric) unmatched_metrics.remove(metric) if per_output_metrics: specs.extend( metric_specs.specs_from_metrics( metrics=per_output_metrics, model_names=[model_name], output_names=[output_name], include_example_count=False, include_weighted_example_count=False)) metrics = list(unmatched_metrics) if metrics: specs.extend( metric_specs.specs_from_metrics( metrics=metrics, model_names=[model_name], include_example_count=False, include_weighted_example_count=False)) return specs
3,014
def __main__(recipe, params): """ Main code: should only call recipe and params (defined from main) :param recipe: :param params: :return: """ # ---------------------------------------------------------------------- # Main Code # ---------------------------------------------------------------------- # This is just a test if 'TEXT' in params['INPUTS']: if params['INPUTS']['TEXT'] not in ['None', None, '']: WLOG(params, '', params['INPUTS']['TEXT']) # ---------------------------------------------------------------------- # End of main code # ---------------------------------------------------------------------- return core.return_locals(params, locals())
3,015
def AICrss(n, k, rss): """Calculate the Akaike Information Criterion value, using: - n: number of observations - k: number of parameters - rss: residual sum of squares """ return n * log((2 * pi) / n) + n + 2 + n * log(rss) + 2 * k
3,016
def CONVERT_OUT(s): """ convert a directory of module into a reponsed output directory if s doesn't beneath the module, raise NotInSelfModuleError Args: s : a relative directory beneathed the module Returns: return the relative path of responsed output directory """ if sys.argv[0] == 'PLANISH': return "" env = Environment.GetCurrent() _s = os.path.normpath(os.path.join(env.BrocDir(), s)) # to check whether _s beneathes directory of module if env.ModulePath() not in _s: raise NotInSelfModuleError(env.BrocDir(), _s) return os.path.normpath(os.path.join('broc_out', env.BrocCVSDir(), s))
3,017
def random_traveling_salesman(points, distmat, avg_edges=None, start=None, max_perm_samples=2e3, end=None, debug=0): """ Finds the shortest route to visit all the cities by bruteforce. Time complexity is O(N!), so never use on long lists. We use a limit of max_perm_samples (default=2k) random samples of the permutation space of all possible routes and the select the route with the minimal overall route distance. Args: points, distmat (np.matrix): the matrix of distances between all stops in the field of interest. start=None, max_perm_samples=2e3, end=None, debug=0 Returns: path (np.array): ordered points optimized according to distmat """ if start is None: start = points[0] npoints = len(points) if avg_edges is None: nnodes = distmat.shape[0] nedges = sum([(~np.isinf(distmat[k, k+1:])).sum() for k in range(nnodes)]) avg_edges = int(nedges/nnodes) + 1 # attempt to estimate the number of possible routes given the average # number of edges per node nroutes_test = min(int(max_perm_samples), avg_edges**npoints) if debug: print(f'drawing {nroutes_test} random routes to test') # construct a limited set of random permutations if not(isinstance(points, np.ndarray)): points = np.asarray(points) else: points = points.copy() this_perm = points # permutes = [] best_permute = None nvalid_found = 0 best = np.inf while nvalid_found < nroutes_test: # len(best_permute) < nroutes_test: np.random.shuffle(this_perm) if this_perm[0] == start: nvalid_found += 1 # permutes.append(this_perm.copy()) length = total_distance(this_perm, distmat) if length < best: best = length best_permute = this_perm.copy() # total_dist = np.zeros(len(permutes)) # if debug: # print(total_dist) # for pidx, perm in enumerate(permutes): # total_dist[pidx] = total_distance(perm, distmat) # path = permutes[np.argsort(total_dist)[0]] path = best_permute if end is not None: path = path.tolist() path.append(end) return np.asarray(path) else: return path
3,018
def get_username_from_access_token(token: str, secret_key: str, algorithm: str) -> Optional[str]: """ Decodes a token and returns the "sub" (= username) of the decoded token :param token: JWT access token :param secret_key: The secret key that should be used for token decoding :param algorithm: The algorith that should be used for token decoding (like HS256) :return: Username """ try: payload = jwt.decode(token, secret_key, algorithms=[algorithm]) username: str = payload.get("sub") if not username: raise credentials_exception return username except (JWTError, ExpiredSignatureError, JWTClaimsError): raise credentials_exception
3,019
def _inject_getter_attrs( metaself, objname, attrs, configurable_attrs, depc_name=None, depcache_attrs=None, settable_attrs=None, aliased_attrs=None, ): """ Used by the metaclass to inject methods and properties into the class inheriting from ObjectList1D """ if settable_attrs is None: settable_attrs = [] settable_attrs = set(settable_attrs) # Inform the class of which variables will be injected metaself._settable_attrs = settable_attrs metaself._attrs = attrs metaself._configurable_attrs = configurable_attrs if depcache_attrs is None: metaself._depcache_attrs = [] else: metaself._depcache_attrs = ['%s_%s' % (tbl, col) for tbl, col in depcache_attrs] if aliased_attrs is not None: metaself._attrs_aliases = aliased_attrs else: metaself._attrs_aliases = {} # if not getattr(metaself, '__needs_inject__', True): # return attr_to_aliases = ut.invert_dict(metaself._attrs_aliases, unique_vals=False) # What is difference between configurable and depcache getters? # Could depcache getters just be made configurable? # I guess its just an efficincy thing. Actually its config2_-vs-config # FIXME: rectify differences between normal / configurable / depcache # getter def _make_caching_setter(attrname, _rowid_setter): def _setter(self, values, *args, **kwargs): if self._ibs is None: self._internal_attrs[attrname] = values else: if self._caching and attrname in self._internal_attrs: self._internal_attrs[attrname] = values _rowid_setter(self, self._rowids, values) ut.set_funcname(_setter, '_set_' + attrname) return _setter def _make_caching_getter(attrname, _rowid_getter): def _getter(self): if self._ibs is None or (self._caching and attrname in self._internal_attrs): data = self._internal_attrs[attrname] else: data = _rowid_getter(self, self._rowids) if self._caching: self._internal_attrs[attrname] = data return data ut.set_funcname(_getter, '_get_' + attrname) return _getter # make default version use implicit rowids and another # that takes explicit rowids. def _make_setters(objname, attrname): ibs_funcname = 'set_%s_%s' % (objname, attrname) def _rowid_setter(self, rowids, values, *args, **kwargs): ibs_callable = getattr(self._ibs, ibs_funcname) ibs_callable(rowids, values, *args, **kwargs) ut.set_funcname(_rowid_setter, '_rowid_set_' + attrname) _setter = _make_caching_setter(attrname, _rowid_setter) return _rowid_setter, _setter # --- def _make_getters(objname, attrname): ibs_funcname = 'get_%s_%s' % (objname, attrname) def _rowid_getter(self, rowids): ibs_callable = getattr(self._ibs, ibs_funcname) data = ibs_callable(rowids) if self._asarray: data = np.array(data) return data ut.set_funcname(_rowid_getter, '_rowid_get_' + attrname) _getter = _make_caching_getter(attrname, _rowid_getter) return _rowid_getter, _getter def _make_cfg_getters(objname, attrname): ibs_funcname = 'get_%s_%s' % (objname, attrname) def _rowid_getter(self, rowids): ibs_callable = getattr(self._ibs, ibs_funcname) data = ibs_callable(rowids, config2_=self._config) if self._asarray: data = np.array(data) return data ut.set_funcname(_rowid_getter, '_rowid_get_' + attrname) _getter = _make_caching_getter(attrname, _rowid_getter) return _rowid_getter, _getter def _make_depc_getters(depc_name, attrname, tbl, col): def _rowid_getter(self, rowids): depc = getattr(self._ibs, depc_name) data = depc.get(tbl, rowids, col, config=self._config) if self._asarray: data = np.array(data) return data ut.set_funcname(_rowid_getter, '_rowid_get_' + attrname) _getter = _make_caching_getter(attrname, _rowid_getter) return _rowid_getter, _getter # Collect setter / getter functions and properties rowid_getters = [] getters = [] setters = [] properties = [] for attrname in attrs: _rowid_getter, _getter = _make_getters(objname, attrname) if attrname in settable_attrs: _rowid_setter, _setter = _make_setters(objname, attrname) setters.append(_setter) else: _setter = None prop = property(fget=_getter, fset=_setter) rowid_getters.append((attrname, _rowid_getter)) getters.append(_getter) properties.append((attrname, prop)) for attrname in configurable_attrs: _rowid_getter, _getter = _make_cfg_getters(objname, attrname) prop = property(fget=_getter) rowid_getters.append((attrname, _rowid_getter)) getters.append(_getter) properties.append((attrname, prop)) if depcache_attrs is not None: for tbl, col in depcache_attrs: attrname = '%s_%s' % (tbl, col) _rowid_getter, _getter = _make_depc_getters(depc_name, attrname, tbl, col) prop = property(fget=_getter, fset=None) rowid_getters.append((attrname, _rowid_getter)) getters.append(_getter) properties.append((attrname, prop)) aliases = [] # Inject all gathered information for attrname, func in rowid_getters: funcname = ut.get_funcname(func) setattr(metaself, funcname, func) # ensure aliases have rowid getters for alias in attr_to_aliases.get(attrname, []): alias_funcname = '_rowid_get_' + alias setattr(metaself, alias_funcname, func) for func in getters: funcname = ut.get_funcname(func) setattr(metaself, funcname, func) for func in setters: funcname = ut.get_funcname(func) setattr(metaself, funcname, func) for attrname, prop in properties: setattr(metaself, attrname, prop) for alias in attr_to_aliases.pop(attrname, []): aliases.append((alias, attrname)) setattr(metaself, alias, prop) if ut.get_argflag('--autogen-core'): # TODO: turn on autogenertion given a flag def expand_closure_source(funcname, func): source = ut.get_func_sourcecode(func) closure_vars = [ (k, v.cell_contents) for k, v in zip(func.func_code.co_freevars, func.func_closure) ] source = ut.unindent(source) import re for k, v in closure_vars: source = re.sub('\\b' + k + '\\b', ut.repr2(v), source) source = re.sub(r'def .*\(self', 'def ' + funcname + '(self', source) source = ut.indent(source.strip(), ' ') + '\n' return source explicit_lines = [] # build explicit version for jedi? for funcname, func in getters: source = expand_closure_source(funcname, func) explicit_lines.append(source) # build explicit version for jedi? for funcname, func in setters: source = expand_closure_source(funcname, func) explicit_lines.append(source) for attrname, prop in properties: getter_name = None if prop.fget is None else ut.get_funcname(prop.fget) setter_name = None if prop.fset is None else ut.get_funcname(prop.fset) source = ' %s = property(%s, %s)' % (attrname, getter_name, setter_name) explicit_lines.append(source) for alias, attrname in aliases: source = ' %s = %s' % (alias, attrname) explicit_lines.append(source) explicit_source = ( '\n'.join( [ 'from wbia import _wbia_object', '', '', 'class _%s_base_class(_wbia_object.ObjectList1D):', ' __needs_inject__ = False', '', ] ) % (objname,) ) explicit_source += '\n'.join(explicit_lines) explicit_fname = '_autogen_%s_base.py' % (objname,) from os.path import dirname, join ut.writeto(join(dirname(__file__), explicit_fname), explicit_source + '\n') if attr_to_aliases: raise AssertionError('Unmapped aliases %r' % (attr_to_aliases,))
3,020
def date_handler(obj): """make datetime object json serializable. Notes ----- Taken from here: https://tinyurl.com/yd84fqlw """ if hasattr(obj, 'isoformat'): return obj.isoformat() else: raise TypeError
3,021
def add_token_price(token, _method, _id, price): """ Adds a new token on coinprices.json """ token = token.lower() with open(coinprices_path) as f: coinprices = json.load(f) coinprices[token] = {} coinprices[token]['method'] = _method coinprices[token]['id'] = _id coinprices[token]['price'] = price with open(coinprices_path, 'w', encoding='utf-8') as f: json.dump(coinprices, f, ensure_ascii=False, indent=4)
3,022
def has_type(typestmt, names): """Return type with name if `type` has name as one of its base types, and name is in the `names` list. otherwise, return None.""" if typestmt.arg in names: return typestmt for t in typestmt.search('type'): # check all union's member types r = has_type(t, names) if r is not None: return r typedef = getattr(typestmt, 'i_typedef', None) if typedef is not None and getattr(typedef, 'i_is_circular', None) is False: t = typedef.search_one('type') if t is not None: return has_type(t, names) return None
3,023
def generic_ecsv(file_name, column_mapping=None, **kwargs): """ Read a spectrum from an ECSV file, using generic_spectrum_from_table_loader() to try to figure out which column is which. The ECSV columns must have units, as `generic_spectrum_from_table_loader` depends on this to determine the meaning of the columns. For manual control over the column to spectrum mapping, use the ASCII loader. Parameters ---------- file_name: str The path to the ECSV file. column_mapping : dict A dictionary describing the relation between the ECSV file columns and the arguments of the `Spectrum1D` class, along with unit information. The dictionary keys should be the ECSV file column names while the values should be a two-tuple where the first element is the associated `Spectrum1D` keyword argument, and the second element is the unit for the ECSV file column:: column_mapping = {'FLUX': ('flux': 'Jy')} Returns ------- data: Spectrum1D The spectrum that is represented by the data in this table. """ table = Table.read(file_name, format='ascii.ecsv') if column_mapping is None: return generic_spectrum_from_table(table, **kwargs) return spectrum_from_column_mapping(table, column_mapping)
3,024
def parse_valuation_line(s, encoding=None): """ Parse a line in a valuation file. Lines are expected to be of the form:: noosa => n girl => {g1, g2} chase => {(b1, g1), (b2, g1), (g1, d1), (g2, d2)} :param s: input line :type s: str :param encoding: the encoding of the input string, if it is binary :type encoding: str :return: a pair (symbol, value) :rtype: tuple """ if encoding is not None: s = s.decode(encoding) pieces = _VAL_SPLIT_RE.split(s) symbol = pieces[0] value = pieces[1] # check whether the value is meant to be a set if value.startswith('{'): value = value[1:-1] tuple_strings = _TUPLES_RE.findall(value) # are the set elements tuples? if tuple_strings: set_elements = [] for ts in tuple_strings: ts = ts[1:-1] element = tuple(_ELEMENT_SPLIT_RE.split(ts)) set_elements.append(element) else: set_elements = _ELEMENT_SPLIT_RE.split(value) value = set(set_elements) return symbol, value
3,025
def scrape_dailykos(keywords=KEYWORDS): """ Scrapes news article titles from dailykos.com """ dk_request = requests.get('https://www.dailykos.com') dk_homepage = dk_request.content dk_soup = BeautifulSoup(dk_homepage, 'html.parser') dk_tags = dk_soup.find_all('div', class_='cell-wrapper') dk_links = ['https://www.dailykos.com' + tag.find('a')['href'] for tag in dk_tags] dk_links = [link for link in dk_links if any(keyword in link for keyword in keywords)] # get article titles and dates dk_titles = [] dk_dates = [] for link in dk_links: # prep article content article = requests.get(link) article_content = article.content soup_article = BeautifulSoup(article_content, 'html5lib') # get article title dk_titles.append(soup_article.find('title').get_text()) # get publication date date = str(soup_article.find('span', class_='timestamp')) dk_dates.append(date[len(date) - 21:-7]) # format dates dk_dates = [datetime.datetime.strptime(date, '%B %d, %Y').strftime('%Y-%m-%d') for date in dk_dates] # assembling data dailykos_data = pd.DataFrame.from_dict({ 'publisher': 'dailykos', 'date': dk_dates, 'link': dk_links, 'article_title': dk_titles }) dailykos_data.drop_duplicates(inplace=True) return dailykos_data
3,026
def write_json(file_path, data, do_clear=False): """Write `data` to the JSON file specified by `file_path`, optionally clearing the file before adding `data` Parameters ---------- file_path: String The target .json file path to which `data` will be written data: Object The content to save at the .json file given by `file_path` do_clear: Boolean, default=False If True, the contents of the file at `file_path` will be cleared before saving `data`""" if do_clear is True: clear_file(file_path) with open(file_path, "w") as f: json.dump(data, f, default=default_json_write)
3,027
def test_output_result_01(fixture_main_detection_01, monkeypatch): """output_result emits files (plot is excluded from test)""" args = fixture_main_detection_01["args"] log = pd.DataFrame( [ [pd.Timestamp("2021-06-01 01:00:00"), "1.1.1.1", 1, 11], [pd.Timestamp("2021-06-01 02:00:00"), "2.2.2.2", 2, 22], [pd.Timestamp("2021-06-01 03:00:00"), "3.3.3.3", 3, 33], ], columns=["datetime_rounded", "src_ip", "column_1", "column_2"], ).set_index(["datetime_rounded", "src_ip"]) label = pd.Series() dir_report = fixture_main_detection_01["report_dir"] x_train_labeled_embeddings = None x_test_embeddings = None idx_anomaly = [2, 3, 4] shap_value_idx_sorted = pd.DataFrame( [ [pd.Timestamp("2021-06-01 01:00:00"), "1.1.1.1", 1], [pd.Timestamp("2021-06-01 02:00:00"), "2.2.2.2", 2], [pd.Timestamp("2021-06-01 03:00:00"), "3.3.3.3", 3], ], columns=["datetime_rounded", "src_ip", "column_1"], ).set_index(["datetime_rounded", "src_ip"]) anomaly_score_sorted = shap_value_idx_sorted["column_1"] stats = {} previous_config = FileStorageConfig( base=FileStorageBaseConfig( dir=os.path.join(fixture_main_detection_01["parent_dir"], "write_log") ), load=FileLoader.Config(), save=FileSaver.Config(compression=True, all=False), ) os.makedirs(dir_report, exist_ok=True) # skip test for plot_detection monkeypatch.setattr( "psykoda.io.reporting.plot.plot_detection", lambda *args, **kwargs: None ) expected_name_anomaly = [ (pd.Timestamp("2021-06-01 01:00:00"), "1.1.1.1"), (pd.Timestamp("2021-06-01 02:00:00"), "2.2.2.2"), (pd.Timestamp("2021-06-01 03:00:00"), "3.3.3.3"), ] actual_ret = internal.output_result( args=args, log=log, label=label, dir_report=dir_report, x_train_labeled_embeddings=x_train_labeled_embeddings, x_test_embeddings=x_test_embeddings, idx_anomaly=idx_anomaly, shap_value_idx_sorted=shap_value_idx_sorted, anomaly_score_sorted=anomaly_score_sorted, stats=stats, previous_config=previous_config, ) try: assert os.path.isfile(os.path.join(dir_report, internal.FILENAME_REPORT)) assert os.path.isdir(previous_config.base.dir) for dt, src_ip in shap_value_idx_sorted.index: assert os.path.isfile( os.path.join( previous_config.base.dir, dt.strftime(f"%Y-%m-%d-%H__{src_ip}.zip") ) ) assert actual_ret["num_anomaly"] == 3 assert actual_ret["name_anomaly"] == expected_name_anomaly finally: if os.path.isdir(previous_config.base.dir): shutil.rmtree(previous_config.base.dir)
3,028
def parse_rows(m: utils.Matrix[str]) -> pd.DataFrame: """Parse rows to DataFrame, expecting specific columns and types.""" if len(m) < 2: logger.error('More than one line expected in {}'.format(str(m))) return pd.DataFrame() # parse data rows and add type casting cols = len(m[0]) df = pd.DataFrame([row for row in m[1:] if len(row) == cols], columns=m[0]) pairs = (('Market Value', utils.str_to_float), ('Weight (%)', utils.str_to_float), ('Notional Value', utils.str_to_float), ('Shares', utils.str_to_int), ('Price', utils.str_to_float), ('FX Rate', utils.str_to_float), ('Accrual Date', utils.parse_date_name) ) for col, f in pairs: try: df[col] = df[col].apply(f) except Exception as e: logger.error('Error when casting {}: {}'.format(col, e)) return df
3,029
def tiny_id(page_id): """Return *tiny link* ID for the given page ID.""" return base64.b64encode(struct.pack('<L', int(page_id)).rstrip(b'\0'), altchars=b'_-').rstrip(b'=').decode('ascii')
3,030
def test_smooth_goddard_2013(PM_da_control_3d_full): """Test whether Goddard 2013 recommendations are fulfilled by smooth_Goddard_2013.""" da = PM_da_control_3d_full actual = smooth_goddard_2013( da, ) # test that x, y not in dims assert "x" not in actual.dims assert "y" not in actual.dims # tests whether nlat, nlon got reduced assert actual.time.size < da.time.size assert actual.lon.size < da.lon.size assert actual.lat.size < da.lat.size
3,031
def invert_color(color: str, *, black_or_white: bool = False) -> str: """Return a color with opposite red, green and blue values. Example: ``invert_color('white')`` is ``'#000000'`` (black). This function uses tkinter for converting the color to RGB. That's why a tkinter root window must have been created, but *color* can be any Tk-compatible color string, like a color name or a ``'#rrggbb'`` string. The return value is always a ``'#rrggbb`` string (also compatible with Tk). If ``black_or_white=True`` is set, then the result is always ``"#000000"`` (black) or ``"#ffffff"`` (white), depending on whether the color is bright or dark. """ if black_or_white: return "#000000" if is_bright(color) else "#ffffff" widget = porcupine.get_main_window() # any widget would do # tkinter uses 16-bit colors, convert them to 8-bit r, g, b = (value >> 8 for value in widget.winfo_rgb(color)) return "#%02x%02x%02x" % (0xFF - r, 0xFF - g, 0xFF - b)
3,032
def pcaImageCube(ref, mask = None, pcNum = None, cube=True, ref3D=True, outputEval = False): """Principal Component Analysis, Input: ref: Cube of references, 3D; if ref3D==False, 2D (Flattened and Normalized, with maksked region excluded.) mask: mask, 2D or 1D; pcNum: how many principal components are needed; cube: output as a cube? Otherwise a flattend 2D component array will be returned. ref3D: Ture by default. outputEval: whether to return the eigen values, False by default. Output: The principal components, either cube (3D) or flattend (2D).""" if mask is None: mask = np.ones(ref[0].shape) if pcNum is None: pcNum = ref.shape[0] if ref3D: mask_flat = mask.flatten() ref_flat = np.zeros((ref.shape[0], np.where(mask_flat == 1)[0].shape[0])) for i in range(ref_flat.shape[0]): ref_flat[i], std = flattenAndNormalize(ref[i], mask) else: ref_flat = ref if np.shape(mask.shape)[0] == 1: #1D mask, already flattened mask_flat = mask elif np.shape(mask.shape)[0] == 2: #2D mask, need flatten mask_flat = mask.flatten() covMatrix = np.dot(ref_flat, np.transpose(ref_flat)) eVal, eVec = np.linalg.eig(covMatrix) index = (-eVal).argsort()[:pcNum] eVec = eVec[:,index] components_flatten = np.dot(np.transpose(eVec), ref_flat) pc_flat = np.zeros((pcNum, mask_flat.shape[0])) for i in range(pc_flat.shape[0]): pc_flat[i][np.where(mask_flat==1)] = components_flatten[i]/np.sqrt(np.dot(components_flatten[i], np.transpose(components_flatten[i]))) if cube == False: return pc_flat pc_cube = np.zeros((pcNum, mask.shape[0], mask.shape[1])) width = mask.shape[0] for i in range(pc_flat.shape[0]): pc_cube[i] = np.array(np.split(pc_flat[i], width)) if not outputEval: return pc_cube else: return pc_cube, eVal[index]
3,033
def get_cross_kerr_table(epr, swp_variable, numeric): """ Function to re-organize the cross-Kerr results once the quantum analysis is finished Parameters: ------------------- epr : Object of QuantumAnalysis class swp_variable : the variable swept in data according to which things will be sorted numeric : Whether numerical diagonalization of the data was performed Use notes: ------------------- * It is assumed the epr.analyze_all_variations has already been called and analysis is finished. """ if numeric: f1 = epr.results.get_frequencies_ND(vs=swp_variable) chis = epr.get_chis(numeric=numeric,swp_variable=swp_variable) else: f1 = epr.results.get_frequencies_O1(vs=swp_variable) chis = epr.get_chis(numeric=numeric,swp_variable=swp_variable) #print(f1) #print(chis) swp_indices = chis.index.levels[0] mode_indices = chis.index.levels[1] #print(mode_indices) mode_combinations = list(zip(mode_indices,mode_indices)) diff_mode_combinations = list(it.combinations_with_replacement(mode_indices,2)) mode_combinations.extend(diff_mode_combinations) organized_data = pd.DataFrame({swp_variable:swp_indices}) organized_data.set_index(swp_variable,inplace=True) for mode_indx in mode_indices: organized_data['f_'+str(mode_indx)+'(GHz)']=np.round(f1.loc[mode_indx].values/1000,3) for combo_indx in mode_combinations: temp_chi_list = [chis.loc[swp_indx].loc[combo_indx] for swp_indx in swp_indices] organized_data['chi_'+str(combo_indx[0])+str(combo_indx[1])+' (MHz)']=np.round(temp_chi_list,4) return organized_data
3,034
def getSpectra(dataframe, indices): """ Returns the files for training and testing Inputs ----------- dataframe: pd.DataFrame object from which we need to get spectra indices: row values for which we need the spectra Returns ----------- spec_vals: pd.DataFrame object containing spectra values for given indices """ colList = dataframe.columns spec_inds = [index for index in range(len(colList)) if colList[index].startswith('Spectrum_')] spec_cols = colList[spec_inds] spec_vals = dataframe[spec_cols].iloc[indices] return spec_vals
3,035
def config2(): """Configure for one of the restart tests.""" return Config.load(f""" id: cbc_binary_toolkit version: 0.0.1 database: _provider: tests.component.persistor_fixtures.mock_persistor.MockPersistorFactory engine: _provider: tests.component.engine_fixtures.mock_engine.MockLocalEngineFactory name: {ENGINE_NAME} feed_id: {FEED_ID} type: local Test: TestPassed """)
3,036
async def test_hmip_light(hass, default_mock_hap): """Test HomematicipLight.""" entity_id = "light.treppe" entity_name = "Treppe" device_model = "HmIP-BSL" ha_entity, hmip_device = get_and_check_entity_basics( hass, default_mock_hap, entity_id, entity_name, device_model ) assert ha_entity.state == "on" service_call_counter = len(hmip_device.mock_calls) await hass.services.async_call( "light", "turn_off", {"entity_id": entity_id}, blocking=True ) assert len(hmip_device.mock_calls) == service_call_counter + 1 assert hmip_device.mock_calls[-1][0] == "turn_off" await async_manipulate_test_data(hass, hmip_device, "on", False) ha_entity = hass.states.get(entity_id) assert ha_entity.state == "off" await hass.services.async_call( "light", "turn_on", {"entity_id": entity_id}, blocking=True ) assert len(hmip_device.mock_calls) == service_call_counter + 3 assert hmip_device.mock_calls[-1][0] == "turn_on" await async_manipulate_test_data(hass, hmip_device, "on", True) ha_entity = hass.states.get(entity_id) assert ha_entity.state == "on"
3,037
def _on_process(*args, **kwargs): """Process the given function in the current subprocess""" try: func = kwargs['__func__'] del kwargs['__func__'] return func(*args, **kwargs) except KeyboardInterrupt: sys.exit() except Exception as e: raise type(e)(traceback.format_exc())
3,038
def test_make_psfs_overwriteFalse(L_radec): """ test that when overwrite=False make_psf() does not update file """ L = L_radec overwrite = False for band in L.bands: file = dir_obj+'psf-{0}.fits'.format(band) if not os.path.isdir(dir_obj): os.makedirs(dir_obj) if os.path.isfile(file): os.remove(file) open(file, 'w').close() assert os.stat(file).st_size == 0 # when file exists it should not update file L.make_psfs(overwrite=overwrite) for band in L.bands: file = dir_obj+'psf-{0}.fits'.format(band) assert os.stat(file).st_size == 0
3,039
def diff_cases(couch_cases, log_cases=False): """Diff cases and return diff data :param couch_cases: dict `{<case_id>: <case_json>, ...}` :returns: `DiffData` """ assert isinstance(couch_cases, dict), repr(couch_cases)[:100] assert "_diff_state" in globals() data = DiffData() dd_count = partial(metrics_counter, tags={"domain": get_domain()}) case_ids = list(couch_cases) sql_case_ids = set() for sql_case in CaseAccessorSQL.get_cases(case_ids): case_id = sql_case.case_id sql_case_ids.add(case_id) couch_case, diffs, changes = diff_case(sql_case, couch_cases[case_id], dd_count) if diffs: dd_count("commcare.couchsqlmigration.case.has_diff") if changes: dd_count("commcare.couchsqlmigration.case.did_change") data.doc_ids.append(case_id) data.diffs.append((couch_case['doc_type'], case_id, diffs)) data.changes.append((couch_case['doc_type'], case_id, changes)) if log_cases: log.info("case %s -> %s diffs", case_id, len(diffs)) diffs, changes = diff_ledgers(case_ids, dd_count) data.diffs.extend(diffs) data.changes.extend(changes) add_missing_docs(data, couch_cases, sql_case_ids, dd_count) return data
3,040
def rk4(a, b, x0, y0, nu=0, F=0, xdot = x_dot, ydot = y_dot): """rk(a, b, x0, y0, nu=0, F=0, xdot = x_dot, ydot = y_dot) Args: a (float) : Lower bound, t = a*2*pi b (float) : Upper bound, t = b*2*pi x0 (float) : Initial position of ball y0 (float) : Initial velocity of ball nu (float) : Constant damping coefficient F (float) : Constant force amplitude coefficient xdot (function) : Part of the differential equation ydot (function) : Part of the differential equation Returns: t (array) : Array over the time interval with equal dt = .001 x (array) : Array containing the position of the ball at each time in the time array y (array) : Array containing the velocity of the ball at each time in the time array """ dt = 0.001 start = 2*a*np.pi end = 2*b*np.pi n = int(np.ceil((end-start)/dt)) t = np.linspace(start,end,n) x = np.zeros(n) y = np.zeros(n) x_dot_vec = np.zeros(n) y_dot_vec = np.zeros(n) x[0] = x0 y[0] = y0 for k in range(n): x_dot_vec[k] = x_dot(y[k]) y_dot_vec[k] = ydot(t[k],y[k],x[k],nu,F) if k == n-1: break else: k1y = dt*ydot(t[k],y[k],x[k],nu,F) k2y = dt*ydot((t[k]+dt/2),(y[k]+k1y/2),x[k],nu,F) k3y = dt*ydot((t[k]+dt/2),(y[k]+k2y/2),x[k],nu,F) k4y = dt*ydot((t[k]+dt),(y[k]+k3y),x[k],nu,F) rky = (k1y+(2*k2y)+(2*k3y)+k4y)/6 y[k+1] = y[k]+rky k1x = dt*xdot(y[k]) k2x = dt*xdot(y[k]+k1x/2) k3x = dt*xdot(y[k]+k2x/2) k4x = dt*xdot(y[k]+k3x) rkx = (k1x+(2*k2x)+(2*k3x)+k4x)/6 x[k+1] = x[k]+rkx return (t,x,y)
3,041
def decode_callbacks(encoded_callbacks): """Decode the callbacks to an executable form.""" from furious.async import Async callbacks = {} for event, callback in encoded_callbacks.iteritems(): if isinstance(callback, dict): async_type = Async if '_type' in callback: async_type = path_to_reference(callback['_type']) callback = async_type.from_dict(callback) else: callback = path_to_reference(callback) callbacks[event] = callback return callbacks
3,042
def create_conv_block( use_depthwise, kernel_size, padding, stride, layer_name, conv_hyperparams, is_training, freeze_batchnorm, depth): """Create Keras layers for depthwise & non-depthwise convolutions. Args: use_depthwise: Whether to use depthwise separable conv instead of regular conv. kernel_size: A list of length 2: [kernel_height, kernel_width] of the filters. Can be an int if both values are the same. padding: One of 'VALID' or 'SAME'. stride: A list of length 2: [stride_height, stride_width], specifying the convolution stride. Can be an int if both strides are the same. layer_name: String. The name of the layer. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. is_training: Indicates whether the feature generator is in training mode. freeze_batchnorm: Bool. Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. depth: Depth of output feature maps. Returns: A list of conv layers. """ layers = [] if use_depthwise: kwargs = conv_hyperparams.params() # Both the regularizer and initializer apply to the depthwise layer, # so we remap the kernel_* to depthwise_* here. kwargs['depthwise_regularizer'] = kwargs['kernel_regularizer'] kwargs['depthwise_initializer'] = kwargs['kernel_initializer'] layers.append( tf.keras.layers.SeparableConv2D( depth, [kernel_size, kernel_size], depth_multiplier=1, padding=padding, strides=stride, name=layer_name + '_depthwise_conv', **kwargs)) else: layers.append(tf.keras.layers.Conv2D( depth, [kernel_size, kernel_size], padding=padding, strides=stride, name=layer_name + '_conv', **conv_hyperparams.params())) layers.append( conv_hyperparams.build_batch_norm( training=(is_training and not freeze_batchnorm), name=layer_name + '_batchnorm')) layers.append( conv_hyperparams.build_activation_layer( name=layer_name)) return layers
3,043
def hard_reset(): """saves an empty pickle [], so you can rerun on all .JSONs""" instance = event_handler() instance.pickle_save(database_dir,file_name="daily_list.pkl")
3,044
def select_eps_for_division(dtype): """Selects default values for epsilon to make divisions safe based on dtype. This function returns an epsilon slightly greater than the smallest positive floating number that is representable for the given dtype. This is mainly used to prevent division by zero, which produces Inf values. However, if the nominator is orders of magnitude greater than `1.0`, eps should also be increased accordingly. Only floating types are supported. Args: dtype: The `tf.DType` of the tensor to which eps will be added. Raises: ValueError: If `dtype` is not a floating type. Returns: A `float` to be used to make operations safe. """ return 10.0 * np.finfo(dtype.as_numpy_dtype).tiny
3,045
def train_freezed_model(x, y, x_tk, y_tk, freezed_comp='encoder', use_check_point=False): """ train the translation model and save checkpoint :param x: Preprocessed English data :param y: Preprocessed French data :param x_tk: English tokenizer :param y_tk: French tokenizer :param freezed_comp: which component in the model is freezed :param use_check_point: whether save model weights to file or not """ mode_constructor = freezed_encoder_model if freezed_comp == 'encoder' else freezed_decoder_model model = mode_constructor(x.shape, y.shape[1], len(x_tk.word_index) + 1, len(y_tk.word_index) + 1) model.summary() checkpoint_path = f"freezed_translator_checkpoint_dir/freezed_{freezed_comp}/cp.ckpt" checkpoint_dir = os.path.dirname(checkpoint_path) if use_check_point and os.listdir(checkpoint_dir).__len__() > 0: latest_cp = tf.train.latest_checkpoint(checkpoint_dir) print(f'loading last model from {latest_cp}') model.load_weights(latest_cp) with open(checkpoint_dir + '/' + 'summary', 'r') as f: summary = json.load(f) return model, summary else: # Create a callback that saves the model's weights cp_callback = keras.callbacks.ModelCheckpoint(filepath=checkpoint_path, save_weights_only=True, verbose=1) # Train the model with the new callback summary = model.fit(x, y, batch_size=1024, epochs=25, validation_split=0.2, callbacks=[cp_callback]) # Pass callback to training with open(checkpoint_dir + '/' + 'summary', 'w') as f: json.dump(summary.history, f) return model, summary.history
3,046
def bpm_to_mspt(bpm, res=480): """ Coverts an integer value of beats per minute to miliseconds per quarter note """ return 60000 / res / bpm
3,047
def Test_frcnn(test_images_list, network_arch, config_filename, preprocessing_function = None, num_rois = None, final_classification_threshold = 0.8): """ Test the object detection network test_images_list --list: list containing path to test_images (No default) network_arc --object: the full faster rcnn network .py file passed as an object (no default) config_filename --str: Full path to the config_file.pickle, generated while training (No default) preprocessing_function --function: optional image preprocessing function (Default None) num_rois --int: (optional)The number of ROIs to process at once in the final classifier (Default None) if not given. The number of ROIs given while training is chosen final_classification_threshold --float: (0,1) min threshold for accepting as a detection in final classifier (Default 0.8) OUTPUT: returns the images with bboxes over layed using opencv, and a dataframe with data """ nn = network_arch assert "list" in str(type(test_images_list)),"test_images_list must be a list of paths to the test images" with open(config_filename, 'rb') as f_in: C = pickle.load(f_in) if num_rois: C.num_rois = int(num_rois) # turn off any data augmentation at test time C.use_horizontal_flips = False C.use_vertical_flips = False C.rot_90 = False def format_img_size(img, C): # utility function 1 """ formats the image size based on config """ img_min_side = float(C.im_size) (height,width,_) = img.shape if width <= height: ratio = img_min_side/width new_height = int(ratio * height) new_width = int(img_min_side) else: ratio = img_min_side/height new_width = int(ratio * width) new_height = int(img_min_side) img = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_CUBIC) return img, ratio def preprocess_img(img, preprocessing_function): #utility function 2 """ formats the image channels based on config """ img = img[:, :, (2, 1, 0)] #bgr to rgb if preprocessing_function: img = preprocessing_function(img) #img = np.transpose(img, (2, 0, 1)) # convert to theano img = np.expand_dims(img, axis=0) return img def format_img(img, C, preprocessing_function): # utility function 3 """ formats an image for model prediction based on config """ img, ratio = format_img_size(img, C) img = preprocess_img(img, preprocessing_function) return img, ratio # Method to transform the coordinates of the bounding box to its original size def get_real_coordinates(ratio, x1, y1, x2, y2): #utility function 4 real_x1 = int(round(x1 // ratio)) real_y1 = int(round(y1 // ratio)) real_x2 = int(round(x2 // ratio)) real_y2 = int(round(y2 // ratio)) return (real_x1, real_y1, real_x2 ,real_y2) class_mapping = C.class_mapping if 'bg' not in class_mapping: class_mapping['bg'] = len(class_mapping) class_mapping = {v: k for k, v in class_mapping.items()} print(class_mapping) class_to_color = {class_mapping[v]: np.random.randint(0, 255, 3) for v in class_mapping} # load the models input_shape_img = (None, None, 3) img_input = Input(shape=input_shape_img) roi_input = Input(shape=(None, 4)) shared_layers = nn.nn_base(img_input) num_features = shared_layers.get_shape().as_list()[3] #512 for vgg-16 feature_map_input = Input(shape=(None, None, num_features)) num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios) rpn = nn.rpn(shared_layers, num_anchors) classifier = nn.classifier(feature_map_input, roi_input, C.num_rois, len(class_mapping)) # create a keras model model_rpn = Model(img_input, rpn) model_classifier = Model([feature_map_input, roi_input], classifier) #Note: The model_classifier in training and testing are different. # In training model_classifier and model_rpn both have the base_nn. # while testing only model_rpn has the base_nn it returns the FM of base_nn # Thus the model_classifier has the FM and ROI as input # This id done to increase the testing speed print('Loading weights from {}'.format(C.weights_all_path)) model_rpn.load_weights(C.weights_all_path, by_name=True) model_classifier.load_weights(C.weights_all_path, by_name=True) list_of_all_images=[] df_list = [] for idx, filepath in enumerate(sorted(test_images_list)): print(os.path.basename(filepath)) img = cv2.imread(filepath) X, ratio = format_img(img, C, preprocessing_function) # get the feature maps and output from the RPN [Y1, Y2, F] = model_rpn.predict(X) R = roi_helpers.rpn_to_roi(Y1, Y2, C, K.image_dim_ordering(), overlap_thresh=C.rpn_nms_threshold,flag="test") # convert from (x1,y1,x2,y2) to (x,y,w,h) R[:, 2] -= R[:, 0] R[:, 3] -= R[:, 1] # apply the spatial pyramid pooling to the proposed regions bboxes = {} probs = {} for jk in range(R.shape[0]//C.num_rois + 1): ROIs = np.expand_dims(R[C.num_rois*jk:C.num_rois*(jk+1), :], axis=0) if ROIs.shape[1] == 0: break if jk == R.shape[0]//C.num_rois: #pad R curr_shape = ROIs.shape target_shape = (curr_shape[0],C.num_rois,curr_shape[2]) ROIs_padded = np.zeros(target_shape).astype(ROIs.dtype) ROIs_padded[:, :curr_shape[1], :] = ROIs ROIs_padded[0, curr_shape[1]:, :] = ROIs[0, 0, :] ROIs = ROIs_padded [P_cls, P_regr] = model_classifier.predict([F, ROIs]) for ii in range(P_cls.shape[1]): if np.max(P_cls[0, ii, :]) < final_classification_threshold or np.argmax(P_cls[0, ii, :]) == (P_cls.shape[2] - 1): continue cls_name = class_mapping[np.argmax(P_cls[0, ii, :])] if cls_name not in bboxes: bboxes[cls_name] = [] probs[cls_name] = [] (x, y, w, h) = ROIs[0, ii, :] cls_num = np.argmax(P_cls[0, ii, :]) try: (tx, ty, tw, th) = P_regr[0, ii, 4*cls_num:4*(cls_num+1)] tx /= C.classifier_regr_std[0] ty /= C.classifier_regr_std[1] tw /= C.classifier_regr_std[2] th /= C.classifier_regr_std[3] x, y, w, h = roi_helpers.apply_regr(x, y, w, h, tx, ty, tw, th) except: pass bboxes[cls_name].append([C.rpn_stride*x, C.rpn_stride*y, C.rpn_stride*(x+w), C.rpn_stride*(y+h)]) probs[cls_name].append(np.max(P_cls[0, ii, :])) probs_list = [] # new list for every image coor_list = [] # new list for every image classes_list = []# new list for every image img_name_list = []# new list for ever image for key in bboxes: bbox = np.array(bboxes[key]) new_boxes, new_probs = roi_helpers.non_max_suppression_fast(bbox, np.array(probs[key]), overlap_thresh=C.test_roi_nms_threshold,max_boxes=C.TEST_RPN_POST_NMS_TOP_N) #0.3 default threshold from original implementation for jk in range(new_boxes.shape[0]): (x1, y1, x2, y2) = new_boxes[jk,:] (real_x1, real_y1, real_x2, real_y2) = get_real_coordinates(ratio, x1, y1, x2, y2) cv2.rectangle(img,(real_x1, real_y1), (real_x2, real_y2), (int(class_to_color[key][0]), int(class_to_color[key][1]), int(class_to_color[key][2])),2) textLabel = '{}: {}'.format(key,int(100*new_probs[jk])) coor_list.append([real_x1,real_y1,real_x2,real_y2]) # get the coordinates classes_list.append(key) probs_list.append(100*new_probs[jk]) img_name_list.append(filepath) (retval,baseLine) = cv2.getTextSize(textLabel,cv2.FONT_HERSHEY_COMPLEX,1,1) textOrg = (real_x1, real_y1-0) cv2.rectangle(img, (textOrg[0] - 5, textOrg[1]+baseLine - 5), (textOrg[0]+retval[0] + 5, textOrg[1]-retval[1] - 5), (0, 0, 0), 2) cv2.rectangle(img, (textOrg[0] - 5,textOrg[1]+baseLine - 5), (textOrg[0]+retval[0] + 5, textOrg[1]-retval[1] - 5), (255, 255, 255), -1) cv2.putText(img, textLabel, textOrg, cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 0), 1) df = pd.DataFrame({"Image_name":img_name_list, "classes":classes_list, "pred_prob":probs_list, "x1_y1_x2_y2":coor_list}) list_of_all_images.append(cv2.cvtColor(img,cv2.COLOR_BGR2RGB)) df_list.append(df) final_df = pd.concat(df_list,ignore_index=True) return(list_of_all_images,final_df)
3,048
def pseudorandom(n, p, key): """ Pseudorandom array of integer indexes >>> pseudorandom(5, [0.5, 0.5], key=123) array([1, 0, 0, 1, 1], dtype=int8) >>> pseudorandom(10, [0.5, 0.2, 0.2, 0.1], key=5) array([0, 2, 0, 3, 0, 1, 2, 1, 0, 0], dtype=int8) """ import numpy as np p = list(p) cp = np.cumsum([0] + p) assert np.allclose(1, cp[-1]) assert len(p) < 256 x = np.random.RandomState(key).random_sample(n) out = np.empty(n, dtype='i1') for i, (low, high) in enumerate(zip(cp[:-1], cp[1:])): out[(x >= low) & (x < high)] = i return out
3,049
def next_hidden(s, A): """From a given state s, use the transition matrix A to generate the next hidden state. """ return choose_idx(A[s])
3,050
def create_network_rcnn(cls, opt): """Separate function for rcnn, which always loads weights first, no init.""" net = cls(opt) net.print_network() util.load_network_path(net, opt.fastercnn_loc, strict=True, rcnn_load=True) if len(opt.gpu_ids) > 0: assert(torch.cuda.is_available()) net.cuda() return net
3,051
def get_board_frame(window, mqtt_sender): """Builds the chessboard GUI.""" frame = ttk.Frame(window, padding=10, borderwidth=5, relief="ridge") frame.grid() frame_label = ttk.Label(frame, text="Board") get_state = ttk.Button(frame, text="Get state") get_state["command"] = lambda: handle_get_state(mqtt_sender) mqtt_sender.state = [[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]] box = [[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]] frame_label.grid() get_state.grid() hint = {"0": "A", "1": "B", "2": "C", "3": "D", "4": "E", "5": "F", "6": "G", "7": "H"} for k in range(8): note = ttk.Label(frame, text=str(hint[str(k)])) note.grid(row=0, column=k + 2) for j in range(2): note = ttk.Label(frame, text=str(j + 1)) note.grid(row=j + 1, column=1) for k in range(8): mqtt_sender.state[j][k] = tkinter.IntVar(value=1) box[j][k] = ttk.Checkbutton(frame, variable=mqtt_sender.state[j][k]) box[j][k].grid(row=j + 1, column=k + 2) note = ttk.Label(frame, text=str(j + 1)) note.grid(row=j + 1, column=10) for j in range(2, 6): note = ttk.Label(frame, text=str(j + 1)) note.grid(row=j + 1, column=1) for k in range(8): mqtt_sender.state[j][k] = tkinter.IntVar() box[j][k] = ttk.Checkbutton(frame, variable=mqtt_sender.state[j][k]) box[j][k].grid(row=j + 1, column=k + 2) note = ttk.Label(frame, text=str(j + 1)) note.grid(row=j + 1, column=10) for j in range(6, 8): note = ttk.Label(frame, text=str(j + 1)) note.grid(row=j + 1, column=1) for k in range(8): mqtt_sender.state[j][k] = tkinter.IntVar(value=1) box[j][k] = ttk.Checkbutton(frame, variable=mqtt_sender.state[j][k]) box[j][k].grid(row=j + 1, column=k + 2) note = ttk.Label(frame, text=str(j + 1)) note.grid(row=j + 1, column=10) for k in range(8): note = ttk.Label(frame, text=str(hint[str(k)])) note.grid(row=10, column=k + 2) return frame
3,052
def compute_rest_time(gps_data, radius): """Compute the duration during which the track stays in a given radius of each point. Args: gps_data (:py:class:`~gps_data_analyzer.gps_data.PoiPoints`): The data used for computation. radius (float): The radius in which the rest time is computed around each point. Returns: ``pandas.Series``: The rest time around each point. """ # TODO: need optimization and cleaning. def _t_inter(current, i1, i2, max_radius, geom="geometry", t="datetime"): d_i1 = i1[geom].distance(current[geom]) d_i2 = i2[geom].distance(current[geom]) t_i1 = i1[t] t_i2 = i2[t] dt = max(t_i1, t_i2) - min(t_i1, t_i2) dd = abs(d_i1 - d_i2) if dd == 0: return dt else: return min(1.0, abs(d_i1 - max_radius) / dd) * dt def _process_one_pt(num, points, max_radius, logger=logging): logger.debug("{}: {}".format(num, points)) data = gps_data pts = np.array(points) pts.sort() pos_i = np.argwhere(pts == num)[0][0] diff_not_one = (pts[1:] - pts[:-1]) != 1 current = data.loc[num] # TODO: make a function for inf and sup parts since the only difference is the # order of diff_not_one and the limits for label_inf_m1 and label_sup_p1 # Inf part if num > 0: if len(diff_not_one[:pos_i]) > 0: diff_not_one[0] = True pos_skip_inf = pos_i - np.argmax(np.flip(diff_not_one[:pos_i])) else: pos_skip_inf = pos_i label_inf = pts[pos_skip_inf] label_inf_m1 = max(0, pts[pos_skip_inf] - 1) inf = data.loc[label_inf] inf_m1 = data.loc[label_inf_m1] dt_inf = current["datetime"] - inf["datetime"] t_inf_inter = dt_inf + _t_inter(current, inf, inf_m1, max_radius) logger.debug("data:\n{}".format(data.loc[[num, label_inf, label_inf_m1]])) logger.debug( "distances = {}".format( data.loc[[label_inf, label_inf_m1], "geometry"].distance( current["geometry"] ) ) ) else: t_inf_inter = pd.Timedelta(0) # Sup part if num != data.index.max(): if len(diff_not_one[pos_i:]) > 0: diff_not_one[-1] = True pos_skip_sup = pos_i + np.argmax(diff_not_one[pos_i:]) else: pos_skip_sup = pos_i label_sup = pts[pos_skip_sup] label_sup_p1 = min(data.index.max(), pts[pos_skip_sup] + 1) sup = data.loc[label_sup] sup_p1 = data.loc[label_sup_p1] dt_sup = sup["datetime"] - current["datetime"] t_sup_inter = dt_sup + _t_inter(current, sup, sup_p1, max_radius) logger.debug("data:\n {}".format(data.loc[[num, label_sup, label_sup_p1]])) logger.debug( "distances = {}".format( data.loc[[label_sup, label_sup_p1], "geometry"].distance( current["geometry"] ) ) ) else: t_sup_inter = pd.Timedelta(0) logger.debug("t_inf_inter = {}".format(t_inf_inter)) logger.debug("t_sup_inter = {}".format(t_sup_inter)) return t_inf_inter, t_sup_inter # Get the closest points of each points points = np.c_[gps_data.x.ravel(), gps_data.y.ravel()] tree = spatial.KDTree(points) points = tree.data in_radius_pts = tree.query_ball_point(points, radius) # Get the times when the track leave the circle with radius = radius t_min = [] t_max = [] for num, i in enumerate(in_radius_pts): t1, t2 = _process_one_pt(num, i, radius) t_min.append(t1) t_max.append(t2) times = pd.DataFrame({"dt_min": t_min, "dt_max": t_max}, index=gps_data.index) # Compute total time duration = times["dt_min"] + times["dt_max"] # Convert time in seconds return duration.apply(pd.Timedelta.total_seconds)
3,053
def add_fundamentals_to_db(): """ Adds the fundamental data to the database from a json file :return:None """ fundFile = 'sectorAnalysis/fundamentals/combinedFundamentals.json' funds = pd.read_json(fundFile) manager = CollectionManager('10y_Fundamentals', 'AlgoTradingDB') for index, row in funds.iterrows(): document = row.to_dict() manager.insert(document, is_dictionary=True) manager.close()
3,054
def tag_resource(resourceArn=None, tags=None): """ Associates the specified tags to a resource with the specified resourceArn . If existing tags on a resource aren\'t specified in the request parameters, they aren\'t changed. When a resource is deleted, the tags associated with that resource are also deleted. See also: AWS API Documentation Exceptions :example: response = client.tag_resource( resourceArn='string', tags=[ { 'key': 'string', 'value': 'string' }, ] ) :type resourceArn: string :param resourceArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the resource to add tags to.\n :type tags: list :param tags: [REQUIRED]\nThe tags to add to the resource. A tag is an array of key-value pairs. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.\n\n(dict) --Optional metadata that you apply to a resource to assist with categorization and organization. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.\n\nkey (string) -- [REQUIRED]One part of a key-value pair that make up a tag. A key is a general label that acts like a category for more specific tag values.\n\nvalue (string) --The optional part of a key-value pair that make up a tag. A value acts as a descriptor within a tag category (key).\n\n\n\n\n :rtype: dict ReturnsResponse Syntax {} Response Structure (dict) -- Exceptions AppMesh.Client.exceptions.BadRequestException AppMesh.Client.exceptions.ForbiddenException AppMesh.Client.exceptions.InternalServerErrorException AppMesh.Client.exceptions.NotFoundException AppMesh.Client.exceptions.ServiceUnavailableException AppMesh.Client.exceptions.TooManyRequestsException AppMesh.Client.exceptions.TooManyTagsException :return: {} :returns: (dict) -- """ pass
3,055
def body(): """Get map page body. Returns: html.Div: dash layout """ graph_map = get_graph_map() if graph_map is None: return html.Div( dbc.Alert("Cannot retrieve data! Try again later!", color="danger") ) # Put everything in a dcc container and return body = dbc.Container( [ dbc.Row( dbc.Col( dbc.Card( dbc.CardBody( [ html.P( "A graph of the UK rail network generated from \ individual train movements captured from the Network Rail feeds and a subset of known fixed locations. \ Each node represents a train describer 'berth' which usually, but not always, represents a signal.\ Red nodes indicate the live locations of trains on the network, \ whilst the node size indicates the frequency of usage. Hovering over each node provides additional information.\ The graph is updated every 5 seconds. \ Only the west coast mainline central signal area (around Manchester) is considered for now." ), ] ), color="secondary", ), width={"size": 10, "offset": 1}, ) ), dbc.Row(dbc.Col(dcc.Graph(id="graph-map", figure=graph_map))), dcc.Interval( id="graph-page-interval", interval=1 * 5000, n_intervals=0, # in milliseconds ), ], fluid=True, ) return body
3,056
def plot_loss(train_loss=None, test_loss=None, epochs=None, figsize=(10, 10)): """ Plot the 3 losses (KLD, REC_LOSS, REC_LOSS + KLD) for possibly train and test data :param train_loss: array where elements are [KLD, REC_LOSS, REC_LOSS + KLD] :param test_loss: array where elements are [KLD, REC_LOSS, REC_LOSS + KLD] :param epochs: number of epochs for x axis :param figsize: plotted window width, height :return: """ fig, axs = plt.subplots(2, 1, figsize=figsize) # plot train loss if given if train_loss is not None: rec_loss = [x[1] for x in train_loss] total_loss = [x[2] for x in train_loss] kld_loss = [x[0] for x in train_loss] axs[0].plot(rec_loss, label='rec_train_loss') axs[0].plot(total_loss, label='total_train_loss') axs[1].plot(kld_loss, label='kld_train_loss') # plot test loss if given if test_loss is not None: rec_loss = [x[1] for x in test_loss] total_loss = [x[2] for x in test_loss] kld_loss = [x[0] for x in test_loss] axs[0].plot(rec_loss, label='rec_test_loss') axs[0].plot(total_loss, label='total_test_loss') axs[1].plot(kld_loss, label='kld_test_loss') plt.title("ELBO LOSS PLOT") axs[0].legend() axs[1].legend() axs[0].set_title("total and reconstructed loss") axs[1].set_title("kld loss") plt.show()
3,057
def coreg_scalp_surfaces(subject_ids, subjects_dir=None): """ Routine to generate high-resolution head surfaces for coordinate alignment Parameters ---------- subject_ids: list of strings List of subjects ids subjects_dir: string String of subject directory output: -head-dense.fif, -head-medium.fif, -head-sparse.fif """ # ----------------------------------- # import necessary modules from mne.commands import mne_make_scalp_surfaces # ----------------------------------- for id in subject_ids: try: print('Create high-resolution head surfaces for coordinate alignment for subject: %s'%id) mne_make_scalp_surfaces._run(subjects_dir, id, force=True, overwrite=True, verbose=True) except: retcode_error('mne_make_scalp_surfaces', id) continue
3,058
def custom_strftime(formatting: str, date: datetime.datetime) -> str: """Custom strftime formatting function, using fancy number suffixes (1st, 2nd, 3rd...)""" return date.strftime(formatting).replace("{S}", str(date.day) + suffix(date.day))
3,059
def get_sample_rate() -> int: """ Get current sampling rate (it may differ from the frequency specified in sv_init()) """
3,060
def make_workdir(run_dir, ccp4i2=False, MAX_WORKDIRS=100): """Make a work directory rooted at run_dir and return its path Parameters ---------- run_dir : str The path to a run directory where the job was started ccp4i2 : bool, optional Indicate if we are running under CCP4I2 Returns ------- work_dir : str The path to the working directory """ if ccp4i2: work_dir = os.path.join(run_dir, I2DIR) else: run_inc = 0 while True: work_dir = os.path.join(run_dir, AMPLEDIR + str(run_inc)) if not os.path.exists(work_dir): break run_inc += 1 if run_inc > MAX_WORKDIRS: raise RuntimeError("Too many work directories! {0}".format(work_dir)) if os.path.exists(work_dir): raise RuntimeError( "There is an existing AMPLE work directory: {0}\n" "Please delete/move it aside.".format(work_dir) ) os.mkdir(work_dir) return work_dir
3,061
def setup_twitter(config_file='config.py'): """Setup auth keys and session with Twitter client.""" config = {} execfile(config_file, config) twitter_obj = Twitter(auth=OAuth(config["access_key"], config["access_secret"], config["consumer_key"], config["consumer_secret"])) return twitter_obj
3,062
def create_datediff_test_nulls_df(): """Create DataFrame with nulls only for DateDifferenceTransformer tests.""" df = pd.DataFrame( { "a": [ datetime.datetime(1993, 9, 27, 11, 58, 58), np.NaN, ], "b": [ np.NaN, datetime.datetime(2019, 12, 25, 11, 58, 58), ], }, index=[0, 1], ) return df
3,063
def business_days_list(start_date: date, end_date: date) -> list[date]: """ business days """ us_holidays = holidays.UnitedStates() days: list[date] = [] for the_date in get_list_of_days(start_date, end_date): if (the_date.weekday() < 5) and (the_date not in us_holidays): days.append(the_date) return days
3,064
def test_3d(): """Test FE in 3D""" def setone(arr): arr[0, :, (arr.shape[0] - 1) // 2] = 1.0 return arr assert pipe( 5, lambda x: np.zeros((1, x, x, x), dtype=int), setone, solve_fe(elastic_modulus=(1.0, 10.0), poissons_ratio=(0.0, 0.0)), lambda x: np.allclose( [np.mean(x["strain"][0, ..., i]) for i in range(6)], [1.0, 0.0, 0.0, 0.0, 0.0, 0.0], ), )
3,065
def get_xray_edges(elements: List[str], wmin: float, wmax: float): """ Using xraydb, return the absorbtion edges Parameters ---------- elements: List[str] A list of the element symbols from which to query absorption edges. wmin: float The smallest wavelength edge to return wmax: float The largest wavelength edge to return Returns ------- output_table: List[str] A table containing absorption edges. - Elem: the element - Energy: the photoionisation energy - Frequency: the frequency of the absorption edge - Wavelength: the wavelength of the absorption edge """ element_absortion_edges_dicts = [] for element in elements: edges = xraydb.xray_edges(element) element_absortion_edges_dicts.append(edges) output_table = [] output_table.append("Elem {:15s} {:15s} {:15s}\n".format("Energy eV", "Frequency Hz", "Wavelength AA")) for i, edges in enumerate(element_absortion_edges_dicts): print("-" * COL_LEN) print("{}: \n".format(elements[i])) print("{:15s} {:15s} {:15s}".format("Energy eV", "Frequency Hz", "Wavelength AA")) keys = edges.keys() prev_key = "K" for key in keys: # This bit will skip edges which have the same energy, I hope if prev_key != key: if edges[prev_key][0] == edges[key][0]: continue prev_key = key energy = edges[key][0] frequency = energy / HEV wavelength = C / frequency / ANGSTROM print("{:9.1f} {:1.12e} {:13.1f}".format(energy, frequency, wavelength)) if wmin < wavelength < wmax: output_table_line = "{:4s} {:9.1f} {:1.12e} {:13.1f}\n".format( elements[i], energy, frequency, wavelength ) output_table.append(output_table_line) print() print("-" * COL_LEN) with open("xray_edges.txt", "w") as f: f.writelines(output_table) return output_table
3,066
def register(klass): """ Registers a Report Engine Report. This gets the namespace and class's slug, puts them in a tuple, and stores the report, indexed by the namespace, slug tuple. :param klass: The class of the report to register. :return: """ _registry[(klass.namespace,klass.slug)] = klass
3,067
def get_mobility_link(): """Get Apple Mobility data link """ # get link with urllib.request.urlopen(index_url) as url: json_link = json.loads(url.read().decode()) base_path = json_link['basePath'] csv_path = json_link['regions']['en-us']['csvPath'] link = site_url + \ base_path + csv_path return link
3,068
def test_wizard_requires_valid_ssid(tmpdir, monkeypatch, wizard_answers): """ When the board is not a valid esp8266 board, the wizard should reject it """ # Given wizard_answers.insert(3, "") # add an invalid entry for ssid config_file = tmpdir.join("test.yaml") input_mock = MagicMock(side_effect=wizard_answers) monkeypatch.setattr("builtins.input", input_mock) monkeypatch.setattr(wz, "safe_print", lambda t=None: 0) monkeypatch.setattr(wz, "sleep", lambda _: 0) monkeypatch.setattr(wz, "wizard_write", MagicMock()) # When retval = wz.wizard(str(config_file)) # Then assert retval == 0
3,069
def publish(): """ Prepares the project locally for HTML publication on GitHub Pages to make the built document available to the general public. Only supports the default `output/html` build directory. Requires Git and a GitHub account. """ from . import utils if not utils.directory_exists("output/html"): raise_cli_error(f""" The directory `output/html` does not exist. Maybe try `pretext build` first? """) import shutil shutil.rmtree("docs",ignore_errors=True) shutil.copytree("output/html","docs") click.echo("Use these instructions if your project isn't already set up with Git and GitHub:") click.echo("https://docs.github.com/en/github/importing-your-projects-to-github/adding-an-existing-project-to-github-using-the-command-line") click.echo("") click.echo("Be sure your repo on GitHub is set to publish from the `docs` subdirectory:") click.echo("https://docs.github.com/en/github/working-with-github-pages/configuring-a-publishing-source-for-your-github-pages-site") click.echo("") click.echo("Once all the above is satisifed, run the following command to update your repository and publish your built HTML on the internet:") click.echo("git add docs; git commit -m 'publish updated HTML'; git push")
3,070
def active_shift(app, token, gqlClient): """returns the currently active shift if it exists""" with app.test_request_context(): request.headers = {'authorization': token} query = '''mutation CreateShift($Active: Boolean!, $StartTime: String) { createShift(active: $Active, startTime: $StartTime) { shift { id startTime active } } } ''' vars = { 'StartTime': (datetime.now() - timedelta(hours=5)).strftime('%Y-%m-%d %H:%M:%S'), 'Active': True } res = gqlClient.execute(query, context_value=request, variables=vars) print("query result:", res) assert res['data']['createShift']['shift']['active'] shift = res['data']['createShift']['shift'] return shift
3,071
def func(arg, kwarg=None): """A function for general use.""" pass
3,072
def tff_cc_test_with_tf_deps(name, tf_deps = [], **kwargs): """A version of `cc_test` that links against TF statically or dynamically. Args: name: A unique name for this target. tf_deps: List of TensorFlow static dependencies. **kwargs: `cc_test` keyword arguments. """ srcs = kwargs.pop("srcs", []) deps = kwargs.pop("deps", []) native.cc_test( name = name, srcs = srcs + if_static( [], macos = [ "@org_tensorflow//tensorflow:libtensorflow_framework.2.dylib", "@org_tensorflow//tensorflow:libtensorflow_cc.2.dylib", ], otherwise = [ "@org_tensorflow//tensorflow:libtensorflow_framework.so.2", "@org_tensorflow//tensorflow:libtensorflow_cc.so.2", ], ), deps = deps + if_static( tf_deps, macos = [ "@org_tensorflow//tensorflow:libtensorflow_framework.2.8.0.dylib", "@org_tensorflow//tensorflow:libtensorflow_cc.2.8.0.dylib", ], otherwise = [ "@org_tensorflow//tensorflow:libtensorflow_framework.so.2.8.0", "@org_tensorflow//tensorflow:libtensorflow_cc.so.2.8.0", ], ), **kwargs )
3,073
def get_batch_size(input): """ Infer the mini-batch size according to `input`. Args: input (tf.Tensor): The input placeholder. Returns: int or tf.Tensor: The batch size. """ if input.get_shape() is None: batch_size = tf.shape(input)[0] else: batch_size = int_shape(input)[0] if batch_size is None: batch_size = tf.shape(input)[0] return batch_size
3,074
def _order_points(pts: np.ndarray) -> np.ndarray: """Extract top left. top right, bottom left, bottom right of region Args: pts (np.ndarray[Tuple]): The coordinate of points Returns: np.ndarray: The coordinate of points. """ x_sorted = pts[np.argsort(pts[:, 0]), :] left_most = x_sorted[:2, :] right_most = x_sorted[2:, :] left_most = left_most[np.argsort(left_most[:, 1]), :] (tl, bl) = left_most distance = dist.cdist(tl[np.newaxis], right_most, "euclidean")[0] (br, tr) = right_most[np.argsort(distance)[::-1], :] return np.array([tl, tr, br, bl], dtype="float32")
3,075
def test_list_time_length_3_nistxml_sv_iv_list_time_length_4_5(mode, save_output, output_format): """ Type list/time is restricted by facet length with value 8. """ assert_bindings( schema="nistData/list/time/Schema+Instance/NISTSchema-SV-IV-list-time-length-4.xsd", instance="nistData/list/time/Schema+Instance/NISTXML-SV-IV-list-time-length-4-5.xml", class_name="NistschemaSvIvListTimeLength4", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", )
3,076
def to_image(obj): """ allgemeine funktion zum anschauen von allen objekttypen (work in progress) gibt image (numpy arry),description zurück description sagt, was alles gemacht wurde um bild darzustellen """ import logging descr = "" if (tf.is_tensor(obj)): obj = obj.numpy() logger = logging.getLogger() old_level = logger.level logger.setLevel(100) if obj.shape: #print(f"Max {max(obj)}") if len(obj.shape) == 2: # grayscale image obj = norm(obj) descr += f"Grayscale Image, mean:{obj.mean()}, var:{obj.var()} \n" if (obj.var() < 0.01): descr += f"Mean abgzogen {obj.mean()} \n" obj = obj - obj.mean() if (obj.mean() < 0.01): i = 0 while (obj.mean() < 0.1 and obj.shape[0] > 10): i += 1 obj = skimage.measure.block_reduce(obj, (2,2), np.max) descr += f"Sehr dunkles Bild, maxpooling ({i} mal)" # in "rgb" umwandeln obj = np.stack((obj,)*3, axis=-1) return obj,descr elif len(obj.shape) == 3: # könnte ein bild sein if obj.shape[0] == 3: obj = np.transpose(obj,(1,2,0)) descr += "channel first \n" if obj.shape[2] == 3: # normales bild obj = norm(obj) descr += f"Mean {obj.mean()}, Variance {obj.var()}\n" if (obj.var() < 0.1): obj = obj - obj.mean() descr += f"Mean abgezogen \n" if (obj.mean() < 0.1): i= 0 while (obj.mean() < 0.1 and obj.shape[0] > 10): i += 1 obj = skimage.measure.block_reduce(obj, (2,2,1), np.max) descr += f"Bild zu dunkel, maxpooling ({i} mal)" return obj,descr else : ## feature map ## zeige ein paar davon n = math.floor(math.sqrt(obj.shape[2]/3)) n = min(n,8) f, axs = plt.subplots(n,n,figsize=(15,15)) descr += f"{obj.shape[2]} Feature Maps mit Shape {obj.shape[0:2]}" print(f'Zeige {n*n*3} Feature Maps via RGB:') for i in range(n*n): r = norm(obj[:,:,i*3]) g = norm(obj[:,:,i*3+1]) b = norm(obj[:,:,i*3+2]) axs.flat[i].set_title(f'{i*3} - {i*3+3}') axs.flat[i].imshow(np.moveaxis(np.array([r,g,b]), 0, 2)) # channels first -> channels last #axs.flat[i].imshow(r,cmap='gray') axs.flat[i].axis('off') elif len(obj.shape) == 4 and obj.shape[0] == 3 and obj.shape[0] == 3: # convolution kernel descr += f"Convolution Kernel {obj.shape}" obj = np.transpose(obj,(2,3,0,1)) obj = np.reshape(obj,(obj.shape[0],-1,3)) #obj = obj[:,:,:3] return to_image(obj) else: print("Tensor ",obj.shape) print(obj) logger.setLevel(old_level) else: return None, "Object of type "+str(type(obj))
3,077
def start(): """ Start the daemon. """ ret = 0 cfg = 'ludolph.cfg' cfg_fp = None cfg_lo = ((os.path.expanduser('~'), '.' + cfg), (sys.prefix, 'etc', cfg), ('/etc', cfg)) config_base_sections = ('global', 'xmpp', 'webserver', 'cron', 'ludolph.bot') # Try to read config file from ~/.ludolph.cfg or /etc/ludolph.cfg for i in cfg_lo: try: cfg_fp = open(os.path.join(*i)) except IOError: continue else: break if not cfg_fp: sys.stderr.write("""\nLudolph can't start!\n You need to create a config file in one these locations: \n%s\n You can rename ludolph.cfg.example and update the required options. The example file is located in: %s\n\n""" % ( '\n'.join([os.path.join(*i) for i in cfg_lo]), os.path.dirname(os.path.abspath(__file__)))) sys.exit(1) # Read and parse configuration # noinspection PyShadowingNames def load_config(fp, reopen=False): config = RawConfigParser() if reopen: fp = open(fp.name) try: # config.readfp() is Deprecated since python 3.2 # noinspection PyDeprecation read_file = config.readfp except AttributeError: read_file = config.read_file read_file(fp) fp.close() return config config = load_config(cfg_fp) # Prepare logging configuration logconfig = { 'level': parse_loglevel(config.get('global', 'loglevel')), 'format': LOGFORMAT, } if config.has_option('global', 'logfile'): logfile = config.get('global', 'logfile').strip() if logfile: logconfig['filename'] = logfile # Daemonize if config.has_option('global', 'daemon'): if config.getboolean('global', 'daemon'): ret = daemonize() # Save pid file if config.has_option('global', 'pidfile'): try: with open(config.get('global', 'pidfile'), 'w') as fp: fp.write('%s' % os.getpid()) except Exception as ex: # Setup logging just to show this error logging.basicConfig(**logconfig) logger.critical('Could not write to pidfile (%s)\n', ex) sys.exit(1) # Setup logging logging.basicConfig(**logconfig) # All exceptions will be logged without exit def log_except_hook(*exc_info): logger.critical('Unhandled exception!', exc_info=exc_info) sys.excepthook = log_except_hook # Default configuration use_tls = True use_ssl = False address = [] # Starting logger.info('Starting Ludolph %s (%s %s)', __version__, sys.executable, sys.version.split()[0]) logger.info('Loaded configuration from %s', cfg_fp.name) # Load plugins # noinspection PyShadowingNames def load_plugins(config, reinit=False): plugins = [] for config_section in config.sections(): config_section = config_section.strip() if config_section in config_base_sections: continue # Parse other possible imports parsed_plugin = config_section.split('.') if len(parsed_plugin) == 1: modname = 'ludolph.plugins.' + config_section plugin = config_section else: modname = config_section plugin = parsed_plugin[-1] logger.info('Loading plugin: %s', modname) try: # Translate super_ludolph_plugin into SuperLudolphPlugin clsname = plugin[0].upper() + re.sub(r'_+([a-zA-Z0-9])', lambda m: m.group(1).upper(), plugin[1:]) module = __import__(modname, fromlist=[clsname]) if reinit and getattr(module, '_loaded_', False): reload(module) module._loaded_ = True imported_class = getattr(module, clsname) if not issubclass(imported_class, LudolphPlugin): raise TypeError('Plugin: %s is not LudolphPlugin instance' % modname) plugins.append(Plugin(config_section, modname, imported_class)) except Exception as ex: logger.exception(ex) logger.critical('Could not load plugin: %s', modname) return plugins plugins = load_plugins(config) # XMPP connection settings if config.has_option('xmpp', 'host'): address = [config.get('xmpp', 'host'), '5222'] if config.has_option('xmpp', 'port'): address[1] = config.get('xmpp', 'port') logger.info('Connecting to jabber server %s', ':'.join(address)) else: logger.info('Using DNS SRV lookup to find jabber server') if config.has_option('xmpp', 'tls'): use_tls = config.getboolean('xmpp', 'tls') if config.has_option('xmpp', 'ssl'): use_ssl = config.getboolean('xmpp', 'ssl') # Here we go xmpp = LudolphBot(config, plugins=plugins) signal.signal(signal.SIGINT, xmpp.shutdown) signal.signal(signal.SIGTERM, xmpp.shutdown) if hasattr(signal, 'SIGHUP'): # Windows does not support SIGHUP - bug #41 # noinspection PyUnusedLocal,PyShadowingNames def sighup(signalnum, handler): if xmpp.reloading: logger.warning('Reload already in progress') else: xmpp.reloading = True try: config = load_config(cfg_fp, reopen=True) logger.info('Reloaded configuration from %s', cfg_fp.name) xmpp.prereload() plugins = load_plugins(config, reinit=True) xmpp.reload(config, plugins=plugins) finally: xmpp.reloading = False signal.signal(signal.SIGHUP, sighup) # signal.siginterrupt(signal.SIGHUP, false) # http://stackoverflow.com/a/4302037 if xmpp.client.connect(tuple(address), use_tls=use_tls, use_ssl=use_ssl): xmpp.client.process(block=True) sys.exit(ret) else: logger.error('Ludolph is unable to connect to jabber server') sys.exit(2)
3,078
def matchPP(a_string): """assumes a_string is a string returns re match object if it finds two consecutive words that start with P, else returns None""" pattern = "[P|p]\w+\s[P|p]\w+" result = re.search(pattern, a_string) return result
3,079
def setBoth(s1, s2): """ Sets both servo motors to specified number of degrees Args: s1, s2 (number): degrees for left and right servos respectively must be between -90 and 90 and will be rounded Raises: Exception if s1 or s2 is not a number Returns: None """ s1 = restrictServoDegrees(s1) s2 = restrictServoDegrees(s2) return _setServos(s1, s2)
3,080
def test_add_returns_correct_id(db_with_3_mops): """Test add_mop() affect on mop_db.count().""" db = db_with_3_mops mop_id = db.add_mop(asdict(new_mop)) assert mop_id == 4
3,081
def transfer_shfe_future_hq(date, file_path, columns_map): """ 将每天的数据统一标准 :return: pd.DataFrame 统一标准后的数据 """ ret = pd.DataFrame() data = json.loads(file_path.read_text()) hq_df = pd.DataFrame(data['o_curinstrument']) total_df = pd.DataFrame(data['o_curproduct']) bflag = hq_df.empty or len(hq_df.columns) < len(columns_map) or len(hq_df.columns) > 20 if bflag: # 原始数据文件为null,不重新下载,需要再运行一次程序 print('dce future hq data:{} is not exist, please rerun program!'.format(file_path.name)) return ret settle_name = columns_map['settle'] hq_df = hq_df[hq_df[settle_name] != ''] hq_df = data_type_conversion(hq_df, 0, list(columns_map.values()), list(columns_map.keys()), date, 'shfe') hq_df.loc[:, 'code'] = hq_df['code'].str.strip() # 商品字母缩写转换 hq_df['code'] = hq_df['code'].transform(lambda x: NAME2CODE_MAP['exchange'][x]) # 构建symbol hq_df['symbol'] = hq_df['code'] + hq_df['symbol'].transform(lambda x: convert_deliver(x, date)) # 计算amount total_df['PRODUCTNAME'] = total_df['PRODUCTNAME'].str.strip() total_df['AVGPRICE'] = pd.to_numeric(total_df['AVGPRICE'], downcast='float') total_df['VOLUME'] = pd.to_numeric(total_df['VOLUME'], downcast='integer') total_df['TURNOVER'] = pd.to_numeric(total_df['TURNOVER'], downcast='float') total_df = total_df[total_df['AVGPRICE'] > 0] total_df['code'] = total_df['PRODUCTNAME'].transform(lambda x: NAME2CODE_MAP['exchange'][x.strip()]) total_df['multiplier'] = total_df['TURNOVER'] / total_df['AVGPRICE'] / total_df['VOLUME'] * 100000000 total_df['multiplier'] = total_df['multiplier'].transform(round) hq_df = hq_df.join(total_df[['code', 'multiplier']].set_index('code'), on='code') hq_df['amount'] = hq_df['volume'] * hq_df['settle'] * hq_df['multiplier'] del hq_df['multiplier'] return hq_df
3,082
def comp_material_bsdf(arg_material_one:bpy.types.Material, arg_material_two:bpy.types.Material) -> bool: """指定マテリアルのBSDFノードを比較する 受け渡したマテリアルの出力ノードに接続されたプリシプルBSDFノードを比較する 比較対象の入力端子のデフォルト値が有効、かつ、全て同一の場合、Trueを返す Args: arg_material_one (bpy.types.Material): 比較マテリアル1 arg_material_two (bpy.types.Material): 比較マテリアル2 Returns: bool: 比較結果(一致:True) """ # マテリアルの出力ノードにプリンシプルBSDFノードが接続されているかチェックする if check_surface_bsdf(arg_material_one) == False: # プリシプルBSDF出なかった場合は処理を終了して False を返す return False # マテリアルの出力ノードにプリンシプルBSDFノードが接続されているかチェックする if check_surface_bsdf(arg_material_two) == False: # プリシプルBSDF出なかった場合、処理を終了して False を返す return False # プリンシプルBSDFノードを取得する get_node_one = get_node_linkoutput(arg_material_one) # プリンシプルBSDFノードを取得する get_node_two = get_node_linkoutput(arg_material_two) # 比較結果フラグ(デフォルトで一致判定) comp_result = True # 比較対象とする入力端子を全てチェックする for bsdfnode_inputname in def_comp_bsdfnode_input_list: # デフォルト値が有効なソケットの情報を取得する nodesocket_one = get_nodesocket_enabledefault(arg_node=get_node_one, arg_inputname=bsdfnode_inputname) nodesocket_two = get_nodesocket_enabledefault(arg_node=get_node_two, arg_inputname=bsdfnode_inputname) # デフォルト値が有効なソケット情報を取得できたか確認する if ((nodesocket_one == None) or (nodesocket_two == None)): # ソケット情報を取得できなかった場合は不一致としてチェックを終了する comp_result = False break # ソケットのタイプが同一か確認する if (type(nodesocket_one) != type(nodesocket_two)): # 同一でない場合は不一致としてチェックを終了する comp_result = False break # タイプ毎の値比較の実施済みフラグ checked_flg = False # NodeSocketFloatのソケットの比較 if isinstance(nodesocket_one, bpy.types.NodeSocketFloat): # 値が一致するか比較する if (nodesocket_one.default_value != nodesocket_two.default_value): # 値が一致しない場合は不一致としてチェックを終了する comp_result = False break else: # タイプ毎の値比較の実施済みフラグを設定する checked_flg = True # NodeSocketFloatFactorのソケットの比較 if isinstance(nodesocket_one, bpy.types.NodeSocketFloatFactor): # 値が一致するか比較する if (nodesocket_one.default_value != nodesocket_two.default_value): # 値が一致しない場合は不一致としてチェックを終了する comp_result = False break else: # タイプ毎の値比較の実施済みフラグを設定する checked_flg = True # NodeSocketVectorのソケットの比較 if isinstance(nodesocket_one, bpy.types.NodeSocketVector): # 値が一致するか比較する if ((nodesocket_one.default_value[0] != nodesocket_two.default_value[0]) or (nodesocket_one.default_value[1] != nodesocket_two.default_value[1]) or (nodesocket_one.default_value[2] != nodesocket_two.default_value[2])): # 値が一致しない場合は不一致としてチェックを終了する comp_result = False break else: # タイプ毎の値比較の実施済みフラグを設定する checked_flg = True # NodeSocketColorのソケットの比較 if isinstance(nodesocket_one, bpy.types.NodeSocketColor): # 値が一致するか比較する if ((nodesocket_one.default_value[0] != nodesocket_two.default_value[0]) or (nodesocket_one.default_value[1] != nodesocket_two.default_value[1]) or (nodesocket_one.default_value[2] != nodesocket_two.default_value[2]) or (nodesocket_one.default_value[3] != nodesocket_two.default_value[3])): # 値が一致しない場合は不一致としてチェックを終了する comp_result = False break else: # タイプ毎の値比較の実施済みフラグを設定する checked_flg = True # 値比較を実施済みか確認する if checked_flg == False: # 合致するタイプがない場合はBSDFでないと判断して不一致としてチェックを終了する comp_result = False break return comp_result
3,083
def run_filters(): """Runs filters ('PAINS', 'ZINC', 'BRENK', 'NIH')for molecule selected. Saves the information to the global molecule_info dict and returns the information as its own dict. Pass R Group IDs as queries: /filters?r1=A01&r2=B01 :returns: A json dictionary of the molecule, indexed by the concatenated string of its R Group IDs, with the values for each descriptor, with each key being its respective descriptor label. :rtype: json dict """ filter_names = ['PAINS', 'ZINC', 'BRENK', 'NIH'] r_group_1_id = request.args.get('r1') r_group_2_id = request.args.get('r2') drug_mol = FinalMolecule(r_group_1_id, r_group_2_id) drug_filters = drug_mol.filter_properties() molecule_key = tuple2str((r_group_1_id, r_group_2_id)) filt_dict = {} filt_dict[molecule_key] = {} for label in filter_names: if "filters" in molecule_info[molecule_key].keys(): pass else: molecule_info[molecule_key]["filters"] = {} molecule_info[molecule_key]["filters"][label] = drug_filters[label] filt_dict[molecule_key][label] = drug_filters[label] return jsonify({"filter_dict": filt_dict})
3,084
def read_template(engine, template_name): """Read template string from file and get path.""" template_file = get_template_file(engine, template_name) template_string = template_file.read_text() return template_string, template_file.parent
3,085
def test_init(): """Checks the proper imports and initialisation""" assert True
3,086
def get_qbert_v3_url(qbert_url, project_id): """Keystone only hands out a v1 url I need v3.""" qbert_v3_url = "{0}/v3/{1}".format(qbert_url[0:-3], project_id) return qbert_v3_url
3,087
def gen_all_holds(hand): """ Generate all possible choices of dice from hand to hold. hand: sorted full yahtzee hand Returns a set of tuples, where each tuple is sorted dice to hold """ # start off with the original hand in set set_holds = set([(hand)]) # now iterate with all sub hands with one element removed for item in hand: list_hand = list(hand) list_hand.remove(item) # add to set_holds this sub hand set_holds.add(tuple(list_hand)) # also add to set_holds the recursion of this sub hand # set functionality also takes care of repeated sub hands set_holds.update(gen_all_holds(tuple(list_hand))) return set_holds
3,088
def add_features_for_one_doc(features, tokens, segment_ids, input_mask, masked_lm_positions, masked_lm_labels, masked_lm_weights, tokenizer, doc_index): """Add features for one document in a WikiDocPair example.""" input_ids = tokenizer.convert_tokens_to_ids(tokens) features["input_ids_" + doc_index] = utils.create_int_feature(input_ids) features["input_mask_" + doc_index] = utils.create_int_feature(input_mask) features["segment_ids_" + doc_index] = utils.create_int_feature(segment_ids) if masked_lm_labels: masked_lm_ids = tokenizer.convert_tokens_to_ids(masked_lm_labels) features["masked_lm_positions_" + doc_index] = utils.create_int_feature(masked_lm_positions) features["masked_lm_ids_" + doc_index] = utils.create_int_feature(masked_lm_ids) features["masked_lm_weights_" + doc_index] = utils.create_float_feature(masked_lm_weights)
3,089
def events_generators_pool_run(id_): """ Periodically activates generators according to generator pool settings. """ IOLoop.current().add_callback(_events_generators_pool_run, id_)
3,090
def sndrcv(*args, **kwargs): # type: (*Any, **Any) -> Tuple[SndRcvList, PacketList] """Scapy raw function to send a packet and receive its answer. WARNING: This is an internal function. Using sr/srp/sr1/srp is more appropriate in many cases. """ sndrcver = SndRcvHandler(*args, **kwargs) return sndrcver.results()
3,091
def set_project_designer(value: str) -> None: """ Args: value (str): value """
3,092
def get_by_name(db_session: Session, *, name: str) -> Optional[Action]: """Return action object based on action name. Arguments: db_session {Session} -- SQLAlchemy Session object name {str} -- action name Returns: Optional[Action] -- Returns a Action object or nothing if it doesn't exist """ return db_session.query(Action).filter(Action.name == name).first()
3,093
def langstring(value: str, language: str = "x-none") -> dict: """Langstring.""" return { "langstring": { "lang": language, "#text": value, } }
3,094
def chinese_half2full(): """Convert all halfwidth Chinese characters to fullwidth . Returns: """ def string_op(input_str:str): rstring = "" for uchar in input_str: u_code = ord(uchar) if u_code == 32: u_code = 12288 elif 33 <= u_code <= 126: u_code += 65248 rstring += chr(u_code) return rstring return string_op
3,095
def euclidean_distance(p1, p2): """ Returns the Euclidean Distance of a particular point from rest of the points in dataset. """ distance = 0 for i in range(len(p1)-1): distance += (p1[i]-p2[i])**(2) return sqrt(distance)
3,096
def get_arguments(): """Parse command line arguments""" parser = argparse.ArgumentParser(description="""A simple popup calendar""") parser.add_argument( "-p", "--print", help="print date to stdout instead of opening a note", action="store_true", ) parser.add_argument( "-f", "--format", help="""option '-p' output format (datetime.strftime format, defaut='%%Y-{%%m}-%%d')""", dest="format", default="%Y-%m-%d", ) parser.add_argument( "-e", "--editor", help="""editor command to open notes""", dest="editor", default="xdg-open", ) parser.add_argument( "-l", "--locale", help="""force system locale, for example '-l es_ES.utf8'""", dest="locale", default="", ) parser.add_argument( "-c", "--read-cache", dest="is_force_read_cache", action="store_true", help="""force calendar to read old date from cache""" ) parser.add_argument( "-t", "--theme", help="""set calendar theme, default=classic_dark (theme file name without extention)""", dest="theme" ) args, unknown = parser.parse_known_args() unknown = unknown if len(unknown) == 0 else "".join(unknown).strip(' ') return args, unknown
3,097
def default_after_handler(req, resp, resp_validation_error, instance): """ default handler called after the response validation :param req: request provided by the web framework :param resp: response from the endpoint function (if there is no validation error) or response validation error :param resp_validation_error: response validation error :param instance: class instance if the endpoint function is a class method """ if resp_validation_error: logger.info( '500 Response Validation Error', extra={ 'spectree_model': resp_validation_error.model.__name__, 'spectree_validation': resp_validation_error.errors(), }, )
3,098
def img_histogram(file): """ Returns an image's histogram in a combined RGB channel and each individual channel as an array of 256 values. A 0 means that a tonal value is the max and 255 means there are 0 pixels at that value. """ with Image.open(file) as img: histogram = img.histogram() red_histogram = histogram[0:256] red_max = max(red_histogram) green_histogram = histogram[256:512] green_max = max(green_histogram) blue_histogram = histogram[512:768] blue_max = max(blue_histogram) rgb_histogram = [] for i in range(256): rgb_histogram.append(red_histogram[i] + green_histogram[i] + blue_histogram[i]) rgb_max = max(rgb_histogram) for i in range(256): r = red_histogram[i] g = green_histogram[i] b = blue_histogram[i] rgb = rgb_histogram[i] rgb_histogram[i] = round(255 - (rgb * 255 / rgb_max), 2) red_histogram[i] = round(255 - (r * 255 / red_max), 2) green_histogram[i] = round(255 - (g * 255 / green_max), 2) blue_histogram[i] = round(255 - (b * 255 / blue_max), 2) return rgb_histogram, red_histogram, green_histogram, blue_histogram
3,099